nio4r-2.7.3/0000755000004100000410000000000014632135320012605 5ustar www-datawww-datanio4r-2.7.3/nio4r.gemspec0000644000004100000410000001250214632135320015205 0ustar www-datawww-data######################################################### # This file has been automatically generated by gem2tgz # ######################################################### # -*- encoding: utf-8 -*- # stub: nio4r 2.7.3 ruby lib # stub: ext/nio4r/extconf.rb Gem::Specification.new do |s| s.name = "nio4r".freeze s.version = "2.7.3" s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= s.metadata = { "bug_tracker_uri" => "https://github.com/socketry/nio4r/issues", "changelog_uri" => "https://github.com/socketry/nio4r/blob/main/changes.md", "documentation_uri" => "https://www.rubydoc.info/gems/nio4r", "funding_uri" => "https://github.com/sponsors/ioquatix/", "source_code_uri" => "https://github.com/socketry/nio4r.git", "wiki_uri" => "https://github.com/socketry/nio4r/wiki" } if s.respond_to? :metadata= s.require_paths = ["lib".freeze] s.authors = ["Tony Arcieri".freeze, "Samuel Williams".freeze, "Olle Jonsson".freeze, "Gregory Longtin".freeze, "Tiago Cardoso".freeze, "Joao Fernandes".freeze, "Thomas Dziedzic".freeze, "Boaz Segev".freeze, "Logan Bowers".freeze, "Pedro Paiva".freeze, "Jun Aruga".freeze, "Omer Katz".freeze, "Upekshe Jayasekera".freeze, "Tim Carey-Smith".freeze, "Benoit Daloze".freeze, "Sergey Avseyev".freeze, "Tomoya Ishida".freeze, "Usaku Nakamura".freeze, "C\u00E9dric Boutillier".freeze, "Daniel Berger".freeze, "Dirkjan Bussink".freeze, "Hiroshi Shibata".freeze, "Jes\u00FAs Burgos Maci\u00E1".freeze, "Luis Lavena".freeze, "Pavel Rosick\u00FD".freeze, "Sadayuki Furuhashi".freeze, "Stephen von Takach".freeze, "Vladimir Kochnev".freeze, "V\u00EDt Ondruch".freeze, "Anatol Pomozov".freeze, "Bernd Ahlers".freeze, "Charles Oliver Nutter".freeze, "Denis Washington".freeze, "Elad Eyal".freeze, "Jean byroot Boussier".freeze, "Jeffrey Martin".freeze, "John Thornton".freeze, "Jun Jiang".freeze, "Lars Kanis".freeze, "Marek Kowalcze".freeze, "Maxime Demolin".freeze, "Orien Madgwick".freeze, "Pavel Lobashov".freeze, "Per Lundberg".freeze, "Phillip Aldridge".freeze, "Ravil Bayramgalin".freeze, "Shannon Skipper".freeze, "Tao Luo".freeze, "Thomas Kuntz".freeze, "Tsimnuj Hawj".freeze, "Zhang Kang".freeze] s.cert_chain = ["-----BEGIN CERTIFICATE-----\nMIIE2DCCA0CgAwIBAgIBATANBgkqhkiG9w0BAQsFADBhMRgwFgYDVQQDDA9zYW11\nZWwud2lsbGlhbXMxHTAbBgoJkiaJk/IsZAEZFg1vcmlvbnRyYW5zZmVyMRIwEAYK\nCZImiZPyLGQBGRYCY28xEjAQBgoJkiaJk/IsZAEZFgJuejAeFw0yMjA4MDYwNDUz\nMjRaFw0zMjA4MDMwNDUzMjRaMGExGDAWBgNVBAMMD3NhbXVlbC53aWxsaWFtczEd\nMBsGCgmSJomT8ixkARkWDW9yaW9udHJhbnNmZXIxEjAQBgoJkiaJk/IsZAEZFgJj\nbzESMBAGCgmSJomT8ixkARkWAm56MIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIB\nigKCAYEAomvSopQXQ24+9DBB6I6jxRI2auu3VVb4nOjmmHq7XWM4u3HL+pni63X2\n9qZdoq9xt7H+RPbwL28LDpDNflYQXoOhoVhQ37Pjn9YDjl8/4/9xa9+NUpl9XDIW\nsGkaOY0eqsQm1pEWkHJr3zn/fxoKPZPfaJOglovdxf7dgsHz67Xgd/ka+Wo1YqoE\ne5AUKRwUuvaUaumAKgPH+4E4oiLXI4T1Ff5Q7xxv6yXvHuYtlMHhYfgNn8iiW8WN\nXibYXPNP7NtieSQqwR/xM6IRSoyXKuS+ZNGDPUUGk8RoiV/xvVN4LrVm9upSc0ss\nRZ6qwOQmXCo/lLcDUxJAgG95cPw//sI00tZan75VgsGzSWAOdjQpFM0l4dxvKwHn\ntUeT3ZsAgt0JnGqNm2Bkz81kG4A2hSyFZTFA8vZGhp+hz+8Q573tAR89y9YJBdYM\nzp0FM4zwMNEUwgfRzv1tEVVUEXmoFCyhzonUUw4nE4CFu/sE3ffhjKcXcY//qiSW\nxm4erY3XAgMBAAGjgZowgZcwCQYDVR0TBAIwADALBgNVHQ8EBAMCBLAwHQYDVR0O\nBBYEFO9t7XWuFf2SKLmuijgqR4sGDlRsMC4GA1UdEQQnMCWBI3NhbXVlbC53aWxs\naWFtc0BvcmlvbnRyYW5zZmVyLmNvLm56MC4GA1UdEgQnMCWBI3NhbXVlbC53aWxs\naWFtc0BvcmlvbnRyYW5zZmVyLmNvLm56MA0GCSqGSIb3DQEBCwUAA4IBgQB5sxkE\ncBsSYwK6fYpM+hA5B5yZY2+L0Z+27jF1pWGgbhPH8/FjjBLVn+VFok3CDpRqwXCl\nxCO40JEkKdznNy2avOMra6PFiQyOE74kCtv7P+Fdc+FhgqI5lMon6tt9rNeXmnW/\nc1NaMRdxy999hmRGzUSFjozcCwxpy/LwabxtdXwXgSay4mQ32EDjqR1TixS1+smp\n8C/NCWgpIfzpHGJsjvmH2wAfKtTTqB9CVKLCWEnCHyCaRVuKkrKjqhYCdmMBqCws\nJkxfQWC+jBVeG9ZtPhQgZpfhvh+6hMhraUYRQ6XGyvBqEUe+yo6DKIT3MtGE2+CP\neX9i9ZWBydWb8/rvmwmX2kkcBbX0hZS1rcR593hGc61JR6lvkGYQ2MYskBveyaxt\nQ2K9NVun/S785AP05vKkXZEFYxqG6EW012U4oLcFl5MySFajYXRYbuUpH6AY+HP8\nvoD0MPg1DssDLKwXyt1eKD/+Fq0bFWhwVM/1XiAXL7lyYUyOq24KHgQ2Csg=\n-----END CERTIFICATE-----\n".freeze] s.date = "2024-05-07" s.extensions = ["ext/nio4r/extconf.rb".freeze] s.files = ["changes.md".freeze, "ext/libev/Changes".freeze, "ext/libev/LICENSE".freeze, "ext/libev/README".freeze, "ext/libev/ev.c".freeze, "ext/libev/ev.h".freeze, "ext/libev/ev_epoll.c".freeze, "ext/libev/ev_iouring.c".freeze, "ext/libev/ev_kqueue.c".freeze, "ext/libev/ev_linuxaio.c".freeze, "ext/libev/ev_poll.c".freeze, "ext/libev/ev_port.c".freeze, "ext/libev/ev_select.c".freeze, "ext/libev/ev_vars.h".freeze, "ext/libev/ev_win32.c".freeze, "ext/libev/ev_wrap.h".freeze, "ext/nio4r/.clang-format".freeze, "ext/nio4r/bytebuffer.c".freeze, "ext/nio4r/extconf.rb".freeze, "ext/nio4r/libev.h".freeze, "ext/nio4r/monitor.c".freeze, "ext/nio4r/nio4r.h".freeze, "ext/nio4r/nio4r_ext.c".freeze, "ext/nio4r/org/nio4r/ByteBuffer.java".freeze, "ext/nio4r/org/nio4r/Monitor.java".freeze, "ext/nio4r/org/nio4r/Nio4r.java".freeze, "ext/nio4r/org/nio4r/Selector.java".freeze, "ext/nio4r/selector.c".freeze, "lib/nio.rb".freeze, "lib/nio/bytebuffer.rb".freeze, "lib/nio/monitor.rb".freeze, "lib/nio/selector.rb".freeze, "lib/nio/version.rb".freeze, "lib/nio4r.rb".freeze, "license.md".freeze, "readme.md".freeze] s.homepage = "https://github.com/socketry/nio4r".freeze s.licenses = ["MIT".freeze, "BSD-2-Clause".freeze] s.required_ruby_version = Gem::Requirement.new(">= 2.4".freeze) s.rubygems_version = "3.3.15".freeze s.summary = "New IO for Ruby".freeze end nio4r-2.7.3/lib/0000755000004100000410000000000014632135320013353 5ustar www-datawww-datanio4r-2.7.3/lib/nio/0000755000004100000410000000000014632135320014140 5ustar www-datawww-datanio4r-2.7.3/lib/nio/monitor.rb0000644000004100000410000000641614632135320016163 0ustar www-datawww-data# frozen_string_literal: true # Released under the MIT License. # Copyright, 2011-2018, by Tony Arcieri. # Copyright, 2015, by Upekshe Jayasekera. # Copyright, 2015, by Vladimir Kochnev. # Copyright, 2018-2023, by Samuel Williams. # Copyright, 2019-2020, by Gregory Longtin. module NIO # Monitors watch IO objects for specific events class Monitor attr_reader :io, :interests, :selector attr_accessor :value, :readiness # :nodoc: def initialize(io, interests, selector) unless defined?(::OpenSSL) && io.is_a?(::OpenSSL::SSL::SSLSocket) unless io.is_a?(IO) if IO.respond_to? :try_convert io = IO.try_convert(io) elsif io.respond_to? :to_io io = io.to_io end raise TypeError, "can't convert #{io.class} into IO" unless io.is_a? IO end end @io = io @interests = interests @selector = selector @closed = false end # Replace the existing interest set with a new one # # @param interests [:r, :w, :rw, nil] I/O readiness we're interested in (read/write/readwrite) # # @return [Symbol] new interests def interests=(interests) raise EOFError, "monitor is closed" if closed? raise ArgumentError, "bad interests: #{interests}" unless [:r, :w, :rw, nil].include?(interests) @interests = interests end # Add new interests to the existing interest set # # @param interests [:r, :w, :rw] new I/O interests (read/write/readwrite) # # @return [self] def add_interest(interest) case interest when :r case @interests when :r then @interests = :r when :w then @interests = :rw when :rw then @interests = :rw when nil then @interests = :r end when :w case @interests when :r then @interests = :rw when :w then @interests = :w when :rw then @interests = :rw when nil then @interests = :w end when :rw @interests = :rw else raise ArgumentError, "bad interests: #{interest}" end end # Remove interests from the existing interest set # # @param interests [:r, :w, :rw] I/O interests to remove (read/write/readwrite) # # @return [self] def remove_interest(interest) case interest when :r case @interests when :r then @interests = nil when :w then @interests = :w when :rw then @interests = :w when nil then @interests = nil end when :w case @interests when :r then @interests = :r when :w then @interests = nil when :rw then @interests = :r when nil then @interests = nil end when :rw @interests = nil else raise ArgumentError, "bad interests: #{interest}" end end # Is the IO object readable? def readable? readiness == :r || readiness == :rw end # Is the IO object writable? def writable? readiness == :w || readiness == :rw end alias writeable? writable? # Is this monitor closed? def closed? @closed end # Deactivate this monitor def close(deregister = true) @closed = true @selector.deregister(io) if deregister end end end nio4r-2.7.3/lib/nio/bytebuffer.rb0000644000004100000410000001555414632135320016634 0ustar www-datawww-data# frozen_string_literal: true # Released under the MIT License. # Copyright, 2016, by Upekshe Jayasekera. # Copyright, 2016-2017, by Tony Arcieri. # Copyright, 2020, by Thomas Dziedzic. # Copyright, 2023, by Samuel Williams. module NIO # Efficient byte buffers for performant I/O operations class ByteBuffer include Enumerable attr_reader :position, :limit, :capacity # Insufficient capacity in buffer OverflowError = Class.new(IOError) # Not enough data remaining in buffer UnderflowError = Class.new(IOError) # Mark has not been set MarkUnsetError = Class.new(IOError) # Create a new ByteBuffer, either with a specified capacity or populating # it from a given string # # @param capacity [Integer] size of buffer in bytes # # @return [NIO::ByteBuffer] def initialize(capacity) raise TypeError, "no implicit conversion of #{capacity.class} to Integer" unless capacity.is_a?(Integer) @capacity = capacity clear end # Clear the buffer, resetting it to the default state def clear @buffer = ("\0" * @capacity).force_encoding(Encoding::BINARY) @position = 0 @limit = @capacity @mark = nil self end # Set the position to the given value. New position must be less than limit. # Preserves mark if it's less than the new position, otherwise clears it. # # @param new_position [Integer] position in the buffer # # @raise [ArgumentError] new position was invalid def position=(new_position) raise ArgumentError, "negative position given" if new_position < 0 raise ArgumentError, "specified position exceeds capacity" if new_position > @capacity @mark = nil if @mark && @mark > new_position @position = new_position end # Set the limit to the given value. New limit must be less than capacity. # Preserves limit and mark if they're less than the new limit, otherwise # sets position to the new limit and clears the mark. # # @param new_limit [Integer] position in the buffer # # @raise [ArgumentError] new limit was invalid def limit=(new_limit) raise ArgumentError, "negative limit given" if new_limit < 0 raise ArgumentError, "specified limit exceeds capacity" if new_limit > @capacity @position = new_limit if @position > new_limit @mark = nil if @mark && @mark > new_limit @limit = new_limit end # Number of bytes remaining in the buffer before the limit # # @return [Integer] number of bytes remaining def remaining @limit - @position end # Does the ByteBuffer have any space remaining? # # @return [true, false] def full? remaining.zero? end # Obtain the requested number of bytes from the buffer, advancing the position. # If no length is given, all remaining bytes are consumed. # # @raise [NIO::ByteBuffer::UnderflowError] not enough data remaining in buffer # # @return [String] bytes read from buffer def get(length = remaining) raise ArgumentError, "negative length given" if length < 0 raise UnderflowError, "not enough data in buffer" if length > @limit - @position result = @buffer[@position...length] @position += length result end # Obtain the byte at a given index in the buffer as an Integer # # @raise [ArgumentError] index is invalid (either negative or larger than limit) # # @return [Integer] byte at the given index def [](index) raise ArgumentError, "negative index given" if index < 0 raise ArgumentError, "specified index exceeds limit" if index >= @limit @buffer.bytes[index] end # Add a String to the buffer # # @param str [#to_str] data to add to the buffer # # @raise [TypeError] given a non-string type # @raise [NIO::ByteBuffer::OverflowError] buffer is full # # @return [self] def put(str) raise TypeError, "expected String, got #{str.class}" unless str.respond_to?(:to_str) str = str.to_str raise OverflowError, "buffer is full" if str.length > @limit - @position @buffer[@position...str.length] = str @position += str.length self end alias << put # Perform a non-blocking read from the given IO object into the buffer # Reads as much data as is immediately available and returns # # @param [IO] Ruby IO object to read from # # @return [Integer] number of bytes read (0 if none were available) def read_from(io) nbytes = @limit - @position raise OverflowError, "buffer is full" if nbytes.zero? bytes_read = IO.try_convert(io).read_nonblock(nbytes, exception: false) return 0 if bytes_read == :wait_readable self << bytes_read bytes_read.length end # Perform a non-blocking write of the buffer's contents to the given I/O object # Writes as much data as is immediately possible and returns # # @param [IO] Ruby IO object to write to # # @return [Integer] number of bytes written (0 if the write would block) def write_to(io) nbytes = @limit - @position raise UnderflowError, "no data remaining in buffer" if nbytes.zero? bytes_written = IO.try_convert(io).write_nonblock(@buffer[@position...@limit], exception: false) return 0 if bytes_written == :wait_writable @position += bytes_written bytes_written end # Set the buffer's current position as the limit and set the position to 0 def flip @limit = @position @position = 0 @mark = nil self end # Set the buffer's current position to 0, leaving the limit unchanged def rewind @position = 0 @mark = nil self end # Mark a position to return to using the `#reset` method def mark @mark = @position self end # Reset position to the previously marked location # # @raise [NIO::ByteBuffer::MarkUnsetError] mark has not been set (call `#mark` first) def reset raise MarkUnsetError, "mark has not been set" unless @mark @position = @mark self end # Move data between the position and limit to the beginning of the buffer # Sets the position to the end of the moved data, and the limit to the capacity def compact @buffer[0...(@limit - @position)] = @buffer[@position...@limit] @position = @limit - @position @limit = capacity self end # Iterate over the bytes in the buffer (as Integers) # # @return [self] def each(&block) @buffer[0...@limit].each_byte(&block) end # Inspect the state of the buffer # # @return [String] string describing the state of the buffer def inspect format( "#<%s:0x%x @position=%d @limit=%d @capacity=%d>", self.class, object_id << 1, @position, @limit, @capacity ) end end end nio4r-2.7.3/lib/nio/selector.rb0000644000004100000410000001263514632135320016314 0ustar www-datawww-data# frozen_string_literal: true # Released under the MIT License. # Copyright, 2011-2017, by Tony Arcieri. # Copyright, 2012, by Logan Bowers. # Copyright, 2013, by Sadayuki Furuhashi. # Copyright, 2013, by Stephen von Takach. # Copyright, 2013, by Tim Carey-Smith. # Copyright, 2013, by Ravil Bayramgalin. # Copyright, 2014, by Sergey Avseyev. # Copyright, 2014, by John Thornton. # Copyright, 2015, by Vladimir Kochnev. # Copyright, 2015, by Upekshe Jayasekera. # Copyright, 2019-2020, by Gregory Longtin. # Copyright, 2020-2021, by Joao Fernandes. # Copyright, 2023, by Samuel Williams. require "set" module NIO # Selectors monitor IO objects for events of interest class Selector # Return supported backends as symbols # # See `#backend` method definition for all possible backends def self.backends [:ruby] end # Create a new NIO::Selector def initialize(backend = :ruby) raise ArgumentError, "unsupported backend: #{backend}" unless [:ruby, nil].include?(backend) @selectables = {} @lock = Mutex.new # Other threads can wake up a selector @wakeup, @waker = IO.pipe @closed = false end # Return a symbol representing the backend I/O multiplexing mechanism used. # Supported backends are: # * :ruby - pure Ruby (i.e IO.select) # * :java - Java NIO on JRuby # * :epoll - libev w\ Linux epoll # * :poll - libev w\ POSIX poll # * :kqueue - libev w\ BSD kqueue # * :select - libev w\ SysV select # * :port - libev w\ I/O completion ports # * :linuxaio - libev w\ Linux AIO io_submit (experimental) # * :io_uring - libev w\ Linux io_uring (experimental) # * :unknown - libev w\ unknown backend def backend :ruby end # Register interest in an IO object with the selector for the given types # of events. Valid event types for interest are: # * :r - is the IO readable? # * :w - is the IO writeable? # * :rw - is the IO either readable or writeable? def register(io, interest) unless defined?(::OpenSSL) && io.is_a?(::OpenSSL::SSL::SSLSocket) io = IO.try_convert(io) end @lock.synchronize do raise IOError, "selector is closed" if closed? monitor = @selectables[io] raise ArgumentError, "already registered as #{monitor.interests.inspect}" if monitor monitor = Monitor.new(io, interest, self) @selectables[monitor.io] = monitor monitor end end # Deregister the given IO object from the selector def deregister(io) @lock.synchronize do monitor = @selectables.delete IO.try_convert(io) monitor.close(false) if monitor && !monitor.closed? monitor end end # Is the given IO object registered with the selector? def registered?(io) @lock.synchronize { @selectables.key? io } end # Select which monitors are ready def select(timeout = nil) selected_monitors = Set.new @lock.synchronize do readers = [@wakeup] writers = [] @selectables.each do |io, monitor| readers << io if monitor.interests == :r || monitor.interests == :rw writers << io if monitor.interests == :w || monitor.interests == :rw monitor.readiness = nil end ready_readers, ready_writers = Kernel.select(readers, writers, [], timeout) return unless ready_readers # timeout ready_readers.each do |io| if io == @wakeup # Clear all wakeup signals we've received by reading them # Wakeups should have level triggered behavior @wakeup.read(@wakeup.stat.size) else monitor = @selectables[io] monitor.readiness = :r selected_monitors << monitor end end ready_writers.each do |io| monitor = @selectables[io] monitor.readiness = monitor.readiness == :r ? :rw : :w selected_monitors << monitor end end if block_given? selected_monitors.each { |m| yield m } selected_monitors.size else selected_monitors.to_a end end # Wake up a thread that's in the middle of selecting on this selector, if # any such thread exists. # # Invoking this method more than once between two successive select calls # has the same effect as invoking it just once. In other words, it provides # level-triggered behavior. def wakeup # Send the selector a signal in the form of writing data to a pipe begin @waker.write_nonblock "\0" rescue IO::WaitWritable # This indicates the wakeup pipe is full, which means the other thread # has already received many wakeup calls, but not processed them yet. # The other thread will completely drain this pipe when it wakes up, # so it's ok to ignore this exception if it occurs: we know the other # thread has already been signaled to wake up end nil end # Close this selector and free its resources def close @lock.synchronize do return if @closed begin @wakeup.close rescue IOError end begin @waker.close rescue IOError end @closed = true end end # Is this selector closed? def closed? @closed end def empty? @selectables.empty? end end end nio4r-2.7.3/lib/nio/version.rb0000644000004100000410000000033614632135320016154 0ustar www-datawww-data# frozen_string_literal: true # Released under the MIT License. # Copyright, 2011-2018, by Tony Arcieri. # Copyright, 2018-2024, by Samuel Williams. # Copyright, 2023, by Tsimnuj Hawj. module NIO VERSION = "2.7.3" end nio4r-2.7.3/lib/nio4r.rb0000644000004100000410000000025014632135320014730 0ustar www-datawww-data# frozen_string_literal: true # Released under the MIT License. # Copyright, 2023, by Phillip Aldridge. # Copyright, 2023, by Samuel Williams. require_relative "nio" nio4r-2.7.3/lib/nio.rb0000644000004100000410000000260614632135320014471 0ustar www-datawww-data# frozen_string_literal: true # Released under the MIT License. # Copyright, 2011-2017, by Tony Arcieri. # Copyright, 2013, by Stephen von Takach. # Copyright, 2013, by Per Lundberg. # Copyright, 2014, by Marek Kowalcze. # Copyright, 2016, by Upekshe Jayasekera. # Copyright, 2019-2023, by Samuel Williams. # Copyright, 2021, by Jun Jiang. require "socket" require "nio/version" # New I/O for Ruby module NIO # NIO implementation, one of the following (as a string): # * select: in pure Ruby using Kernel.select # * libev: as a C extension using libev # * java: using Java NIO def self.engine ENGINE end def self.pure?(env = ENV) # The user has explicitly opted in to non-native implementation: if env["NIO4R_PURE"] == "true" return true end # Native Ruby on Windows is not supported: if (Gem.win_platform? && !defined?(JRUBY_VERSION)) return true end # M1 native extension is crashing on M1 (arm64): # if RUBY_PLATFORM =~ /darwin/ && RUBY_PLATFORM =~ /arm64/ # return true # end return false end end if NIO.pure? require "nio/monitor" require "nio/selector" require "nio/bytebuffer" NIO::ENGINE = "ruby" else require "nio4r_ext" if defined?(JRUBY_VERSION) require "java" require "jruby" org.nio4r.Nio4r.new.load(JRuby.runtime, false) NIO::ENGINE = "java" else NIO::ENGINE = "libev" end end nio4r-2.7.3/checksums.yaml.gz.sig0000444000004100000410000000060014632135320016650 0ustar www-datawww-data[<ǤaQc:NQpwr O_43W:p6,9VYo#-l63ohcpFƊ/mZ O,fg>E\ko(LA-<$U.LkKunBy$Q)Op=JA}[JHmǨE®*F{&sՋaGƗ3 i[bAvp+xbtqfqЏ)fh 1H,ekWa!*̢?kQ]WI'F4'rb'jv%<㧕e)̝bXdӀ>Km.ECa°-N x:Jə:&=x${Zq2nio4r-2.7.3/readme.md0000644000004100000410000000647714632135320014402 0ustar www-datawww-data# ![nio4r](https://raw.github.com/socketry/nio4r/master/logo.png) [![Development Status](https://github.com/socketry/nio4r/workflows/Test/badge.svg)](https://github.com/socketry/nio4r/actions?workflow=Test) **New I/O for Ruby (nio4r)**: cross-platform asynchronous I/O primitives for scalable network clients and servers. Modeled after the Java NIO API, but simplified for ease-of-use. **nio4r** provides an abstract, cross-platform stateful I/O selector API for Ruby. I/O selectors are the heart of "reactor"-based event loops, and monitor multiple I/O objects for various types of readiness, e.g. ready for reading or writing. ## Projects using nio4r - [ActionCable](https://rubygems.org/gems/actioncable): Rails 5 WebSocket protocol, uses nio4r for a WebSocket server - [Celluloid](https://github.com/celluloid/celluloid-io): Actor-based concurrency framework, uses nio4r for async I/O - [Async](https://github.com/socketry/async): Asynchronous I/O framework for Ruby - [Puma](https://github.com/puma/puma): Ruby/Rack web server built for concurrency ## Goals - Expose high-level interfaces for stateful IO selectors - Keep the API small to maximize both portability and performance across many different OSes and Ruby VMs - Provide inherently thread-safe facilities for working with IO objects ## Supported platforms - Ruby 2.5 - Ruby 2.6 - Ruby 2.7 - Ruby 3.0 - [JRuby](https://github.com/jruby/jruby) - [TruffleRuby](https://github.com/oracle/truffleruby) ## Supported backends - **libev**: MRI C extension targeting multiple native IO selector APIs (e.g epoll, kqueue) - **Java NIO**: JRuby extension which wraps the Java NIO subsystem - **Pure Ruby**: `Kernel.select`-based backend that should work on any Ruby interpreter ## Documentation [Please see the nio4r wiki](https://github.com/socketry/nio4r/wiki) for more detailed documentation and usage notes: - [Getting Started](https://github.com/socketry/nio4r/wiki/Getting-Started): Introduction to nio4r's components - [Selectors](https://github.com/socketry/nio4r/wiki/Selectors): monitor multiple `IO` objects for readiness events - [Monitors](https://github.com/socketry/nio4r/wiki/Monitors): control interests and inspect readiness for specific `IO` objects - [Byte Buffers](https://github.com/socketry/nio4r/wiki/Byte-Buffers): fixed-size native buffers for high-performance I/O See also: - [YARD API documentation](http://www.rubydoc.info/gems/nio4r/frames) ## Non-goals **nio4r** is not a full-featured event framework like [EventMachine](https://github.com/eventmachine/eventmachine) or [Cool.io](https://coolio.github.io/). Instead, nio4r is the sort of thing you might write a library like that on top of. nio4r provides a minimal API such that individual Ruby implementers may choose to produce optimized versions for their platform, without having to maintain a large codebase. ## Releases Bump the version first: bundle exec bake gem:release:version:patch ### CRuby rake clean rake release ### JRuby You might need to delete `Gemfile.lock` before trying to `bundle install`. # Ensure you have the correct JDK: pacman -Syu jdk-openjdk archlinux-java set java-19-openjdk # Ensure you are using jruby: chruby jruby bundle update # Build the package: rake clean rake compile rake release nio4r-2.7.3/data.tar.gz.sig0000444000004100000410000000060014632135320015420 0ustar www-datawww-datayd)x23cyD0ƀ*;KW\;cwP e01ٙτ8툷Sn] Gl.ig Zwiu *`X@mA :bMQSz] P*]ZiݗFpNK`!gr  o2`%jZsd虊zL;};['HۂIѮ q 0C>} #Kpdi-x]tn7%T]9):oN ?8a5ԇ*-L,T)U_z@^+%>E=,pnio4r-2.7.3/license.md0000644000004100000410000000626714632135320014564 0ustar www-datawww-data# MIT License Copyright, 2011-2020, by Tony Arcieri. Copyright, 2012, by Bernd Ahlers. Copyright, 2012, by Logan Bowers. Copyright, 2012, by Dirkjan Bussink. Copyright, 2013, by Sadayuki Furuhashi. Copyright, 2013, by Shannon Skipper. Copyright, 2013, by Stephen von Takach. Copyright, 2013, by Tim Carey-Smith. Copyright, 2013, by Per Lundberg. Copyright, 2013, by Ravil Bayramgalin. Copyright, 2013, by Luis Lavena. Copyright, 2014, by Anatol Pomozov. Copyright, 2014, by Hiroshi Shibata. Copyright, 2014, by Marek Kowalcze. Copyright, 2014, by Sergey Avseyev. Copyright, 2014, by John Thornton. Copyright, 2015-2017, by Tiago Cardoso. Copyright, 2015, by Daniel Berger. Copyright, 2015-2016, by Upekshe Jayasekera. Copyright, 2015, by Vladimir Kochnev. Copyright, 2016-2018, by Jun Aruga. Copyright, 2016, by Omer Katz. Copyright, 2016, by Denis Washington. Copyright, 2016-2021, by Olle Jonsson. Copyright, 2017, by Tao Luo. Copyright, 2017, by Usaku Nakamura. Copyright, 2017-2022, by Gregory Longtin. Copyright, 2017, by Lars Kanis. Copyright, 2017, by Tomoya Ishida. Copyright, 2018-2024, by Samuel Williams. Copyright, 2019, by Cédric Boutillier. Copyright, 2019-2020, by Benoit Daloze. Copyright, 2019, by Jesús Burgos Maciá. Copyright, 2019, by Thomas Kuntz. Copyright, 2019, by Orien Madgwick. Copyright, 2019, by Zhang Kang. Copyright, 2020, by Thomas Dziedzic. Copyright, 2020, by Elad Eyal. Copyright, 2020, by Pedro Paiva. Copyright, 2020, by Boaz Segev. Copyright, 2020, by Charles Oliver Nutter. Copyright, 2020-2021, by Joao Fernandes. Copyright, 2021, by Jun Jiang. Copyright, 2021, by Pavel Lobashov. Copyright, 2021, by Jeffrey Martin. Copyright, 2023-2024, by Pavel Rosický. Copyright, 2023, by Tsimnuj Hawj. Copyright, 2023, by Phillip Aldridge. Copyright, 2023, by Maxime Demolin. Copyright, 2023-2024, by Vít Ondruch. Copyright, 2023, by Jean Boussier. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ## libev Released under the BSD-2-Clause OR GPL-2.0-or-later license. See [ext/libev/LICENSE] for details. Copyright, 2007-2019, by Marc Alexander Lehmann. [ext/libev/LICENSE]: https://github.com/socketry/nio4r/blob/master/ext/libev/LICENSE nio4r-2.7.3/ext/0000755000004100000410000000000014632135320013405 5ustar www-datawww-datanio4r-2.7.3/ext/nio4r/0000755000004100000410000000000014632135320014440 5ustar www-datawww-datanio4r-2.7.3/ext/nio4r/monitor.c0000644000004100000410000002561014632135320016277 0ustar www-datawww-data/* * Copyright (c) 2011 Tony Arcieri. Distributed under the MIT License. See * LICENSE.txt for further details. */ #include "nio4r.h" #include static VALUE mNIO = Qnil; static VALUE cNIO_Monitor = Qnil; /* Allocator/deallocator */ static VALUE NIO_Monitor_allocate(VALUE klass); static void NIO_Monitor_mark(void *data); static size_t NIO_Monitor_memsize(const void *data); /* Methods */ static VALUE NIO_Monitor_initialize(VALUE self, VALUE selector, VALUE io, VALUE interests); static VALUE NIO_Monitor_close(int argc, VALUE *argv, VALUE self); static VALUE NIO_Monitor_is_closed(VALUE self); static VALUE NIO_Monitor_io(VALUE self); static VALUE NIO_Monitor_interests(VALUE self); static VALUE NIO_Monitor_set_interests(VALUE self, VALUE interests); static VALUE NIO_Monitor_add_interest(VALUE self, VALUE interest); static VALUE NIO_Monitor_remove_interest(VALUE self, VALUE interest); static VALUE NIO_Monitor_selector(VALUE self); static VALUE NIO_Monitor_is_readable(VALUE self); static VALUE NIO_Monitor_is_writable(VALUE self); static VALUE NIO_Monitor_value(VALUE self); static VALUE NIO_Monitor_set_value(VALUE self, VALUE obj); static VALUE NIO_Monitor_readiness(VALUE self); /* Internal C functions */ static int NIO_Monitor_symbol2interest(VALUE interests); static void NIO_Monitor_update_interests(VALUE self, int interests); /* Compatibility for Ruby <= 3.1 */ #ifndef HAVE_RB_IO_DESCRIPTOR static int io_descriptor_fallback(VALUE io) { rb_io_t *fptr; GetOpenFile(io, fptr); return fptr->fd; } #define rb_io_descriptor io_descriptor_fallback #endif /* Monitor control how a channel is being waited for by a monitor */ void Init_NIO_Monitor() { mNIO = rb_define_module("NIO"); cNIO_Monitor = rb_define_class_under(mNIO, "Monitor", rb_cObject); rb_define_alloc_func(cNIO_Monitor, NIO_Monitor_allocate); rb_define_method(cNIO_Monitor, "initialize", NIO_Monitor_initialize, 3); rb_define_method(cNIO_Monitor, "close", NIO_Monitor_close, -1); rb_define_method(cNIO_Monitor, "closed?", NIO_Monitor_is_closed, 0); rb_define_method(cNIO_Monitor, "io", NIO_Monitor_io, 0); rb_define_method(cNIO_Monitor, "interests", NIO_Monitor_interests, 0); rb_define_method(cNIO_Monitor, "interests=", NIO_Monitor_set_interests, 1); rb_define_method(cNIO_Monitor, "add_interest", NIO_Monitor_add_interest, 1); rb_define_method(cNIO_Monitor, "remove_interest", NIO_Monitor_remove_interest, 1); rb_define_method(cNIO_Monitor, "selector", NIO_Monitor_selector, 0); rb_define_method(cNIO_Monitor, "value", NIO_Monitor_value, 0); rb_define_method(cNIO_Monitor, "value=", NIO_Monitor_set_value, 1); rb_define_method(cNIO_Monitor, "readiness", NIO_Monitor_readiness, 0); rb_define_method(cNIO_Monitor, "readable?", NIO_Monitor_is_readable, 0); rb_define_method(cNIO_Monitor, "writable?", NIO_Monitor_is_writable, 0); rb_define_method(cNIO_Monitor, "writeable?", NIO_Monitor_is_writable, 0); } static const rb_data_type_t NIO_Monitor_type = { "NIO::Monitor", { NIO_Monitor_mark, RUBY_TYPED_DEFAULT_FREE, NIO_Monitor_memsize, }, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED }; static VALUE NIO_Monitor_allocate(VALUE klass) { struct NIO_Monitor *monitor = (struct NIO_Monitor *)xmalloc(sizeof(struct NIO_Monitor)); assert(monitor); *monitor = (struct NIO_Monitor){.self = Qnil}; return TypedData_Wrap_Struct(klass, &NIO_Monitor_type, monitor); } static void NIO_Monitor_mark(void *data) { struct NIO_Monitor *monitor = (struct NIO_Monitor *)data; rb_gc_mark(monitor->self); } static size_t NIO_Monitor_memsize(const void *data) { const struct NIO_Monitor *monitor = (const struct NIO_Monitor *)data; return sizeof(*monitor); } static VALUE NIO_Monitor_initialize(VALUE self, VALUE io, VALUE interests, VALUE selector_obj) { struct NIO_Monitor *monitor; struct NIO_Selector *selector; ID interests_id; interests_id = SYM2ID(interests); TypedData_Get_Struct(self, struct NIO_Monitor, &NIO_Monitor_type, monitor); if (interests_id == rb_intern("r")) { monitor->interests = EV_READ; } else if (interests_id == rb_intern("w")) { monitor->interests = EV_WRITE; } else if (interests_id == rb_intern("rw")) { monitor->interests = EV_READ | EV_WRITE; } else { rb_raise(rb_eArgError, "invalid event type %s (must be :r, :w, or :rw)", RSTRING_PTR(rb_funcall(interests, rb_intern("inspect"), 0))); } int descriptor = rb_io_descriptor(rb_convert_type(io, T_FILE, "IO", "to_io")); ev_io_init(&monitor->ev_io, NIO_Selector_monitor_callback, descriptor, monitor->interests); rb_ivar_set(self, rb_intern("io"), io); rb_ivar_set(self, rb_intern("interests"), interests); rb_ivar_set(self, rb_intern("selector"), selector_obj); selector = NIO_Selector_unwrap(selector_obj); RB_OBJ_WRITE(self, &monitor->self, self); monitor->ev_io.data = (void *)monitor; /* We can safely hang onto this as we also hang onto a reference to the object where it originally came from */ monitor->selector = selector; if (monitor->interests) { ev_io_start(selector->ev_loop, &monitor->ev_io); } return Qnil; } static VALUE NIO_Monitor_close(int argc, VALUE *argv, VALUE self) { VALUE deregister, selector; struct NIO_Monitor *monitor; TypedData_Get_Struct(self, struct NIO_Monitor, &NIO_Monitor_type, monitor); rb_scan_args(argc, argv, "01", &deregister); selector = rb_ivar_get(self, rb_intern("selector")); if (selector != Qnil) { /* if ev_loop is 0, it means that the loop has been stopped already (see NIO_Selector_shutdown) */ if (monitor->interests && monitor->selector->ev_loop) { ev_io_stop(monitor->selector->ev_loop, &monitor->ev_io); } monitor->selector = 0; rb_ivar_set(self, rb_intern("selector"), Qnil); /* Default value is true */ if (deregister == Qtrue || deregister == Qnil) { rb_funcall(selector, rb_intern("deregister"), 1, rb_ivar_get(self, rb_intern("io"))); } } return Qnil; } static VALUE NIO_Monitor_is_closed(VALUE self) { struct NIO_Monitor *monitor; TypedData_Get_Struct(self, struct NIO_Monitor, &NIO_Monitor_type, monitor); return monitor->selector == 0 ? Qtrue : Qfalse; } static VALUE NIO_Monitor_io(VALUE self) { return rb_ivar_get(self, rb_intern("io")); } static VALUE NIO_Monitor_interests(VALUE self) { return rb_ivar_get(self, rb_intern("interests")); } static VALUE NIO_Monitor_set_interests(VALUE self, VALUE interests) { if (NIL_P(interests)) { NIO_Monitor_update_interests(self, 0); } else { NIO_Monitor_update_interests(self, NIO_Monitor_symbol2interest(interests)); } return rb_ivar_get(self, rb_intern("interests")); } static VALUE NIO_Monitor_add_interest(VALUE self, VALUE interest) { struct NIO_Monitor *monitor; TypedData_Get_Struct(self, struct NIO_Monitor, &NIO_Monitor_type, monitor); interest = monitor->interests | NIO_Monitor_symbol2interest(interest); NIO_Monitor_update_interests(self, (int)interest); return rb_ivar_get(self, rb_intern("interests")); } static VALUE NIO_Monitor_remove_interest(VALUE self, VALUE interest) { struct NIO_Monitor *monitor; TypedData_Get_Struct(self, struct NIO_Monitor, &NIO_Monitor_type, monitor); interest = monitor->interests & ~NIO_Monitor_symbol2interest(interest); NIO_Monitor_update_interests(self, (int)interest); return rb_ivar_get(self, rb_intern("interests")); } static VALUE NIO_Monitor_selector(VALUE self) { return rb_ivar_get(self, rb_intern("selector")); } static VALUE NIO_Monitor_value(VALUE self) { return rb_ivar_get(self, rb_intern("value")); } static VALUE NIO_Monitor_set_value(VALUE self, VALUE obj) { return rb_ivar_set(self, rb_intern("value"), obj); } static VALUE NIO_Monitor_readiness(VALUE self) { struct NIO_Monitor *monitor; TypedData_Get_Struct(self, struct NIO_Monitor, &NIO_Monitor_type, monitor); if ((monitor->revents & (EV_READ | EV_WRITE)) == (EV_READ | EV_WRITE)) { return ID2SYM(rb_intern("rw")); } else if (monitor->revents & EV_READ) { return ID2SYM(rb_intern("r")); } else if (monitor->revents & EV_WRITE) { return ID2SYM(rb_intern("w")); } else { return Qnil; } } static VALUE NIO_Monitor_is_readable(VALUE self) { struct NIO_Monitor *monitor; TypedData_Get_Struct(self, struct NIO_Monitor, &NIO_Monitor_type, monitor); if (monitor->revents & EV_READ) { return Qtrue; } else { return Qfalse; } } static VALUE NIO_Monitor_is_writable(VALUE self) { struct NIO_Monitor *monitor; TypedData_Get_Struct(self, struct NIO_Monitor, &NIO_Monitor_type, monitor); if (monitor->revents & EV_WRITE) { return Qtrue; } else { return Qfalse; } } /* Internal C functions */ static int NIO_Monitor_symbol2interest(VALUE interests) { ID interests_id; interests_id = SYM2ID(interests); if (interests_id == rb_intern("r")) { return EV_READ; } else if (interests_id == rb_intern("w")) { return EV_WRITE; } else if (interests_id == rb_intern("rw")) { return EV_READ | EV_WRITE; } else { rb_raise(rb_eArgError, "invalid interest type %s (must be :r, :w, or :rw)", RSTRING_PTR(rb_funcall(interests, rb_intern("inspect"), 0))); } } static void NIO_Monitor_update_interests(VALUE self, int interests) { ID interests_id; struct NIO_Monitor *monitor; TypedData_Get_Struct(self, struct NIO_Monitor, &NIO_Monitor_type, monitor); if (NIO_Monitor_is_closed(self) == Qtrue) { rb_raise(rb_eEOFError, "monitor is closed"); } if (interests) { switch (interests) { case EV_READ: interests_id = rb_intern("r"); break; case EV_WRITE: interests_id = rb_intern("w"); break; case EV_READ | EV_WRITE: interests_id = rb_intern("rw"); break; default: rb_raise(rb_eRuntimeError, "bogus NIO_Monitor_update_interests! (%d)", interests); } rb_ivar_set(self, rb_intern("interests"), ID2SYM(interests_id)); } else { rb_ivar_set(self, rb_intern("interests"), Qnil); } if (monitor->interests != interests) { // If the monitor currently has interests, we should stop it. if (monitor->interests) { ev_io_stop(monitor->selector->ev_loop, &monitor->ev_io); } // Assign the interests we are now monitoring for: monitor->interests = interests; ev_io_set(&monitor->ev_io, monitor->ev_io.fd, monitor->interests); // If we are interested in events, schedule the monitor back into the event loop: if (monitor->interests) { ev_io_start(monitor->selector->ev_loop, &monitor->ev_io); } } } nio4r-2.7.3/ext/nio4r/bytebuffer.c0000644000004100000410000003262114632135320016745 0ustar www-datawww-data#include "nio4r.h" static VALUE mNIO = Qnil; static VALUE cNIO_ByteBuffer = Qnil; static VALUE cNIO_ByteBuffer_OverflowError = Qnil; static VALUE cNIO_ByteBuffer_UnderflowError = Qnil; static VALUE cNIO_ByteBuffer_MarkUnsetError = Qnil; /* Allocator/deallocator */ static VALUE NIO_ByteBuffer_allocate(VALUE klass); static void NIO_ByteBuffer_free(void *data); static size_t NIO_ByteBuffer_memsize(const void *data); /* Methods */ static VALUE NIO_ByteBuffer_initialize(VALUE self, VALUE capacity); static VALUE NIO_ByteBuffer_clear(VALUE self); static VALUE NIO_ByteBuffer_get_position(VALUE self); static VALUE NIO_ByteBuffer_set_position(VALUE self, VALUE new_position); static VALUE NIO_ByteBuffer_get_limit(VALUE self); static VALUE NIO_ByteBuffer_set_limit(VALUE self, VALUE new_limit); static VALUE NIO_ByteBuffer_capacity(VALUE self); static VALUE NIO_ByteBuffer_remaining(VALUE self); static VALUE NIO_ByteBuffer_full(VALUE self); static VALUE NIO_ByteBuffer_get(int argc, VALUE *argv, VALUE self); static VALUE NIO_ByteBuffer_fetch(VALUE self, VALUE index); static VALUE NIO_ByteBuffer_put(VALUE self, VALUE string); static VALUE NIO_ByteBuffer_write_to(VALUE self, VALUE file); static VALUE NIO_ByteBuffer_read_from(VALUE self, VALUE file); static VALUE NIO_ByteBuffer_flip(VALUE self); static VALUE NIO_ByteBuffer_rewind(VALUE self); static VALUE NIO_ByteBuffer_mark(VALUE self); static VALUE NIO_ByteBuffer_reset(VALUE self); static VALUE NIO_ByteBuffer_compact(VALUE self); static VALUE NIO_ByteBuffer_each(VALUE self); static VALUE NIO_ByteBuffer_inspect(VALUE self); #define MARK_UNSET -1 /* Compatibility for Ruby <= 3.1 */ #ifndef HAVE_RB_IO_DESCRIPTOR static int io_descriptor_fallback(VALUE io) { rb_io_t *fptr; GetOpenFile(io, fptr); return fptr->fd; } #define rb_io_descriptor io_descriptor_fallback #endif static void io_set_nonblock(VALUE io) { rb_io_t *fptr; GetOpenFile(io, fptr); rb_io_set_nonblock(fptr); } void Init_NIO_ByteBuffer() { mNIO = rb_define_module("NIO"); cNIO_ByteBuffer = rb_define_class_under(mNIO, "ByteBuffer", rb_cObject); rb_define_alloc_func(cNIO_ByteBuffer, NIO_ByteBuffer_allocate); cNIO_ByteBuffer_OverflowError = rb_define_class_under(cNIO_ByteBuffer, "OverflowError", rb_eIOError); cNIO_ByteBuffer_UnderflowError = rb_define_class_under(cNIO_ByteBuffer, "UnderflowError", rb_eIOError); cNIO_ByteBuffer_MarkUnsetError = rb_define_class_under(cNIO_ByteBuffer, "MarkUnsetError", rb_eIOError); rb_include_module(cNIO_ByteBuffer, rb_mEnumerable); rb_define_method(cNIO_ByteBuffer, "initialize", NIO_ByteBuffer_initialize, 1); rb_define_method(cNIO_ByteBuffer, "clear", NIO_ByteBuffer_clear, 0); rb_define_method(cNIO_ByteBuffer, "position", NIO_ByteBuffer_get_position, 0); rb_define_method(cNIO_ByteBuffer, "position=", NIO_ByteBuffer_set_position, 1); rb_define_method(cNIO_ByteBuffer, "limit", NIO_ByteBuffer_get_limit, 0); rb_define_method(cNIO_ByteBuffer, "limit=", NIO_ByteBuffer_set_limit, 1); rb_define_method(cNIO_ByteBuffer, "capacity", NIO_ByteBuffer_capacity, 0); rb_define_method(cNIO_ByteBuffer, "size", NIO_ByteBuffer_capacity, 0); rb_define_method(cNIO_ByteBuffer, "remaining", NIO_ByteBuffer_remaining, 0); rb_define_method(cNIO_ByteBuffer, "full?", NIO_ByteBuffer_full, 0); rb_define_method(cNIO_ByteBuffer, "get", NIO_ByteBuffer_get, -1); rb_define_method(cNIO_ByteBuffer, "[]", NIO_ByteBuffer_fetch, 1); rb_define_method(cNIO_ByteBuffer, "<<", NIO_ByteBuffer_put, 1); rb_define_method(cNIO_ByteBuffer, "read_from", NIO_ByteBuffer_read_from, 1); rb_define_method(cNIO_ByteBuffer, "write_to", NIO_ByteBuffer_write_to, 1); rb_define_method(cNIO_ByteBuffer, "flip", NIO_ByteBuffer_flip, 0); rb_define_method(cNIO_ByteBuffer, "rewind", NIO_ByteBuffer_rewind, 0); rb_define_method(cNIO_ByteBuffer, "mark", NIO_ByteBuffer_mark, 0); rb_define_method(cNIO_ByteBuffer, "reset", NIO_ByteBuffer_reset, 0); rb_define_method(cNIO_ByteBuffer, "compact", NIO_ByteBuffer_compact, 0); rb_define_method(cNIO_ByteBuffer, "each", NIO_ByteBuffer_each, 0); rb_define_method(cNIO_ByteBuffer, "inspect", NIO_ByteBuffer_inspect, 0); } static const rb_data_type_t NIO_ByteBuffer_type = { "NIO::ByteBuffer", { NULL, // Nothing to mark NIO_ByteBuffer_free, NIO_ByteBuffer_memsize, }, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED }; static VALUE NIO_ByteBuffer_allocate(VALUE klass) { struct NIO_ByteBuffer *bytebuffer = (struct NIO_ByteBuffer *)xmalloc(sizeof(struct NIO_ByteBuffer)); bytebuffer->buffer = NULL; return TypedData_Wrap_Struct(klass, &NIO_ByteBuffer_type, bytebuffer); } static void NIO_ByteBuffer_free(void *data) { struct NIO_ByteBuffer *buffer = (struct NIO_ByteBuffer *)data; if (buffer->buffer) xfree(buffer->buffer); xfree(buffer); } static size_t NIO_ByteBuffer_memsize(const void *data) { const struct NIO_ByteBuffer *buffer = (const struct NIO_ByteBuffer *)data; size_t memsize = sizeof(struct NIO_ByteBuffer); if (buffer->buffer) memsize += buffer->capacity; return memsize; } static VALUE NIO_ByteBuffer_initialize(VALUE self, VALUE capacity) { struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); buffer->capacity = NUM2INT(capacity); buffer->buffer = xmalloc(buffer->capacity); NIO_ByteBuffer_clear(self); return self; } static VALUE NIO_ByteBuffer_clear(VALUE self) { struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); memset(buffer->buffer, 0, buffer->capacity); buffer->position = 0; buffer->limit = buffer->capacity; buffer->mark = MARK_UNSET; return self; } static VALUE NIO_ByteBuffer_get_position(VALUE self) { struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); return INT2NUM(buffer->position); } static VALUE NIO_ByteBuffer_set_position(VALUE self, VALUE new_position) { int pos; struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); pos = NUM2INT(new_position); if (pos < 0) { rb_raise(rb_eArgError, "negative position given"); } if (pos > buffer->limit) { rb_raise(rb_eArgError, "specified position exceeds limit"); } buffer->position = pos; if (buffer->mark > buffer->position) { buffer->mark = MARK_UNSET; } return new_position; } static VALUE NIO_ByteBuffer_get_limit(VALUE self) { struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); return INT2NUM(buffer->limit); } static VALUE NIO_ByteBuffer_set_limit(VALUE self, VALUE new_limit) { int lim; struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); lim = NUM2INT(new_limit); if (lim < 0) { rb_raise(rb_eArgError, "negative limit given"); } if (lim > buffer->capacity) { rb_raise(rb_eArgError, "specified limit exceeds capacity"); } buffer->limit = lim; if (buffer->position > lim) { buffer->position = lim; } if (buffer->mark > lim) { buffer->mark = MARK_UNSET; } return new_limit; } static VALUE NIO_ByteBuffer_capacity(VALUE self) { struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); return INT2NUM(buffer->capacity); } static VALUE NIO_ByteBuffer_remaining(VALUE self) { struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); return INT2NUM(buffer->limit - buffer->position); } static VALUE NIO_ByteBuffer_full(VALUE self) { struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); return buffer->position == buffer->limit ? Qtrue : Qfalse; } static VALUE NIO_ByteBuffer_get(int argc, VALUE *argv, VALUE self) { int len; VALUE length, result; struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); rb_scan_args(argc, argv, "01", &length); if (length == Qnil) { len = buffer->limit - buffer->position; } else { len = NUM2INT(length); } if (len < 0) { rb_raise(rb_eArgError, "negative length given"); } if (len > buffer->limit - buffer->position) { rb_raise(cNIO_ByteBuffer_UnderflowError, "not enough data in buffer"); } result = rb_str_new(buffer->buffer + buffer->position, len); buffer->position += len; return result; } static VALUE NIO_ByteBuffer_fetch(VALUE self, VALUE index) { int i; struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); i = NUM2INT(index); if (i < 0) { rb_raise(rb_eArgError, "negative index given"); } if (i >= buffer->limit) { rb_raise(rb_eArgError, "specified index exceeds limit"); } return INT2NUM(buffer->buffer[i]); } static VALUE NIO_ByteBuffer_put(VALUE self, VALUE string) { long length; struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); StringValue(string); length = RSTRING_LEN(string); if (length > buffer->limit - buffer->position) { rb_raise(cNIO_ByteBuffer_OverflowError, "buffer is full"); } memcpy(buffer->buffer + buffer->position, StringValuePtr(string), length); buffer->position += length; return self; } static VALUE NIO_ByteBuffer_read_from(VALUE self, VALUE io) { struct NIO_ByteBuffer *buffer; ssize_t nbytes, bytes_read; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); io = rb_convert_type(io, T_FILE, "IO", "to_io"); io_set_nonblock(io); nbytes = buffer->limit - buffer->position; if (nbytes == 0) { rb_raise(cNIO_ByteBuffer_OverflowError, "buffer is full"); } bytes_read = read(rb_io_descriptor(io), buffer->buffer + buffer->position, nbytes); if (bytes_read < 0) { if (errno == EAGAIN) { return INT2NUM(0); } else { rb_sys_fail("write"); } } buffer->position += bytes_read; return SIZET2NUM(bytes_read); } static VALUE NIO_ByteBuffer_write_to(VALUE self, VALUE io) { struct NIO_ByteBuffer *buffer; ssize_t nbytes, bytes_written; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); io = rb_convert_type(io, T_FILE, "IO", "to_io"); io_set_nonblock(io); nbytes = buffer->limit - buffer->position; if (nbytes == 0) { rb_raise(cNIO_ByteBuffer_UnderflowError, "no data remaining in buffer"); } bytes_written = write(rb_io_descriptor(io), buffer->buffer + buffer->position, nbytes); if (bytes_written < 0) { if (errno == EAGAIN) { return INT2NUM(0); } else { rb_sys_fail("write"); } } buffer->position += bytes_written; return SIZET2NUM(bytes_written); } static VALUE NIO_ByteBuffer_flip(VALUE self) { struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); buffer->limit = buffer->position; buffer->position = 0; buffer->mark = MARK_UNSET; return self; } static VALUE NIO_ByteBuffer_rewind(VALUE self) { struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); buffer->position = 0; buffer->mark = MARK_UNSET; return self; } static VALUE NIO_ByteBuffer_mark(VALUE self) { struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); buffer->mark = buffer->position; return self; } static VALUE NIO_ByteBuffer_reset(VALUE self) { struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); if (buffer->mark < 0) { rb_raise(cNIO_ByteBuffer_MarkUnsetError, "mark has not been set"); } else { buffer->position = buffer->mark; } return self; } static VALUE NIO_ByteBuffer_compact(VALUE self) { struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); memmove(buffer->buffer, buffer->buffer + buffer->position, buffer->limit - buffer->position); buffer->position = buffer->limit - buffer->position; buffer->limit = buffer->capacity; return self; } static VALUE NIO_ByteBuffer_each(VALUE self) { int i; struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); if (rb_block_given_p()) { for (i = 0; i < buffer->limit; i++) { rb_yield(INT2NUM(buffer->buffer[i])); } } else { rb_raise(rb_eArgError, "no block given"); } return self; } static VALUE NIO_ByteBuffer_inspect(VALUE self) { struct NIO_ByteBuffer *buffer; TypedData_Get_Struct(self, struct NIO_ByteBuffer, &NIO_ByteBuffer_type, buffer); return rb_sprintf( "#<%s:%p @position=%d @limit=%d @capacity=%d>", rb_class2name(CLASS_OF(self)), (void *)self, buffer->position, buffer->limit, buffer->capacity); } nio4r-2.7.3/ext/nio4r/.clang-format0000644000004100000410000000057014632135320017015 0ustar www-datawww-data--- Language: Cpp BasedOnStyle: WebKit AllowAllParametersOfDeclarationOnNextLine: false BinPackArguments: false BinPackParameters: false AlignConsecutiveMacros: false AlignConsecutiveAssignments: false BreakBeforeBraces: Linux BraceWrapping: AfterControlStatement: Never IndentCaseLabels: true PointerAlignment: Right SpaceBeforeParens: ControlStatements IndentWidth: 4 ... nio4r-2.7.3/ext/nio4r/selector.c0000644000004100000410000004476114632135320016440 0ustar www-datawww-data/* * Copyright (c) 2011 Tony Arcieri. Distributed under the MIT License. See * LICENSE.txt for further details. */ #include "nio4r.h" #ifdef HAVE_RUBYSIG_H #include "rubysig.h" #endif #ifdef HAVE_UNISTD_H #include #else #include #endif #include #include static VALUE mNIO = Qnil; static VALUE cNIO_Monitor = Qnil; static VALUE cNIO_Selector = Qnil; /* Allocator/deallocator */ static VALUE NIO_Selector_allocate(VALUE klass); static void NIO_Selector_mark(void *data); static void NIO_Selector_shutdown(struct NIO_Selector *selector); static void NIO_Selector_free(void *data); static size_t NIO_Selector_memsize(const void *data); /* Class methods */ static VALUE NIO_Selector_supported_backends(VALUE klass); /* Instance methods */ static VALUE NIO_Selector_initialize(int argc, VALUE *argv, VALUE self); static VALUE NIO_Selector_backend(VALUE self); static VALUE NIO_Selector_register(VALUE self, VALUE selectable, VALUE interest); static VALUE NIO_Selector_deregister(VALUE self, VALUE io); static VALUE NIO_Selector_is_registered(VALUE self, VALUE io); static VALUE NIO_Selector_select(int argc, VALUE *argv, VALUE self); static VALUE NIO_Selector_wakeup(VALUE self); static VALUE NIO_Selector_close(VALUE self); static VALUE NIO_Selector_closed(VALUE self); static VALUE NIO_Selector_is_empty(VALUE self); /* Internal functions */ static VALUE NIO_Selector_synchronize(VALUE self, VALUE (*func)(VALUE arg), VALUE arg); static VALUE NIO_Selector_unlock(VALUE lock); static VALUE NIO_Selector_register_synchronized(VALUE arg); static VALUE NIO_Selector_deregister_synchronized(VALUE arg); static VALUE NIO_Selector_select_synchronized(VALUE arg); static VALUE NIO_Selector_close_synchronized(VALUE arg); static VALUE NIO_Selector_closed_synchronized(VALUE arg); static int NIO_Selector_run(struct NIO_Selector *selector, VALUE timeout); static void NIO_Selector_timeout_callback(struct ev_loop *ev_loop, struct ev_timer *timer, int revents); static void NIO_Selector_wakeup_callback(struct ev_loop *ev_loop, struct ev_io *io, int revents); /* Default number of slots in the buffer for selected monitors */ #define INITIAL_READY_BUFFER 32 /* Ruby 1.8 needs us to busy wait and run the green threads scheduler every 10ms */ #define BUSYWAIT_INTERVAL 0.01 /* Selectors wait for events */ void Init_NIO_Selector(void) { mNIO = rb_define_module("NIO"); cNIO_Selector = rb_define_class_under(mNIO, "Selector", rb_cObject); rb_define_alloc_func(cNIO_Selector, NIO_Selector_allocate); rb_define_singleton_method(cNIO_Selector, "backends", NIO_Selector_supported_backends, 0); rb_define_method(cNIO_Selector, "initialize", NIO_Selector_initialize, -1); rb_define_method(cNIO_Selector, "backend", NIO_Selector_backend, 0); rb_define_method(cNIO_Selector, "register", NIO_Selector_register, 2); rb_define_method(cNIO_Selector, "deregister", NIO_Selector_deregister, 1); rb_define_method(cNIO_Selector, "registered?", NIO_Selector_is_registered, 1); rb_define_method(cNIO_Selector, "select", NIO_Selector_select, -1); rb_define_method(cNIO_Selector, "wakeup", NIO_Selector_wakeup, 0); rb_define_method(cNIO_Selector, "close", NIO_Selector_close, 0); rb_define_method(cNIO_Selector, "closed?", NIO_Selector_closed, 0); rb_define_method(cNIO_Selector, "empty?", NIO_Selector_is_empty, 0); cNIO_Monitor = rb_define_class_under(mNIO, "Monitor", rb_cObject); } static const rb_data_type_t NIO_Selector_type = { "NIO::Selector", { NIO_Selector_mark, NIO_Selector_free, NIO_Selector_memsize, }, 0, 0, RUBY_TYPED_WB_PROTECTED // Don't free immediately because of shutdown }; /* Create the libev event loop and incoming event buffer */ static VALUE NIO_Selector_allocate(VALUE klass) { struct NIO_Selector *selector; int fds[2]; /* Use a pipe to implement the wakeup mechanism. I know libev provides async watchers that implement this same behavior, but I'm getting segvs trying to use that between threads, despite claims of thread safety. Pipes are nice and safe to use between threads. Note that Java NIO uses this same mechanism */ if (pipe(fds) < 0) { rb_sys_fail("pipe"); } /* Use non-blocking reads/writes during wakeup, in case the buffer is full */ if (fcntl(fds[0], F_SETFL, O_NONBLOCK) < 0 || fcntl(fds[1], F_SETFL, O_NONBLOCK) < 0) { rb_sys_fail("fcntl"); } VALUE obj = TypedData_Make_Struct(klass, struct NIO_Selector, &NIO_Selector_type, selector); /* Defer initializing the loop to #initialize */ selector->ev_loop = 0; ev_init(&selector->timer, NIO_Selector_timeout_callback); selector->wakeup_reader = fds[0]; selector->wakeup_writer = fds[1]; ev_io_init(&selector->wakeup, NIO_Selector_wakeup_callback, selector->wakeup_reader, EV_READ); selector->wakeup.data = (void *)selector; selector->closed = selector->selecting = selector->wakeup_fired = selector->ready_count = 0; RB_OBJ_WRITE(obj, &selector->ready_array, Qnil); return obj; } struct NIO_Selector *NIO_Selector_unwrap(VALUE self) { struct NIO_Selector *selector; TypedData_Get_Struct(self, struct NIO_Selector, &NIO_Selector_type, selector); return selector; } /* NIO selectors store all Ruby objects in instance variables so mark is a stub */ static void NIO_Selector_mark(void *data) { struct NIO_Selector *selector = (struct NIO_Selector *)data; if (selector->ready_array != Qnil) { rb_gc_mark(selector->ready_array); } } /* Free a Selector's system resources. Called by both NIO::Selector#close and the finalizer below */ static void NIO_Selector_shutdown(struct NIO_Selector *selector) { if (selector->closed) { return; } close(selector->wakeup_reader); close(selector->wakeup_writer); if (selector->ev_loop) { ev_loop_destroy(selector->ev_loop); selector->ev_loop = 0; } selector->closed = 1; } /* Ruby finalizer for selector objects */ static void NIO_Selector_free(void *data) { struct NIO_Selector *selector = (struct NIO_Selector *)data; NIO_Selector_shutdown(selector); xfree(selector); } static size_t NIO_Selector_memsize(const void *data) { return sizeof(struct NIO_Selector); } /* Return an array of symbols for supported backends */ static VALUE NIO_Selector_supported_backends(VALUE klass) { unsigned int backends = ev_supported_backends(); VALUE result = rb_ary_new(); if (backends & EVBACKEND_EPOLL) { rb_ary_push(result, ID2SYM(rb_intern("epoll"))); } if (backends & EVBACKEND_POLL) { rb_ary_push(result, ID2SYM(rb_intern("poll"))); } if (backends & EVBACKEND_KQUEUE) { rb_ary_push(result, ID2SYM(rb_intern("kqueue"))); } if (backends & EVBACKEND_SELECT) { rb_ary_push(result, ID2SYM(rb_intern("select"))); } if (backends & EVBACKEND_PORT) { rb_ary_push(result, ID2SYM(rb_intern("port"))); } if (backends & EVBACKEND_LINUXAIO) { rb_ary_push(result, ID2SYM(rb_intern("linuxaio"))); } if (backends & EVBACKEND_IOURING) { rb_ary_push(result, ID2SYM(rb_intern("io_uring"))); } return result; } /* Create a new selector. This is more or less the pure Ruby version translated into an MRI cext */ static VALUE NIO_Selector_initialize(int argc, VALUE *argv, VALUE self) { ID backend_id; VALUE backend; VALUE lock; struct NIO_Selector *selector; unsigned int flags = 0; TypedData_Get_Struct(self, struct NIO_Selector, &NIO_Selector_type, selector); rb_scan_args(argc, argv, "01", &backend); if (backend != Qnil) { if (!rb_ary_includes(NIO_Selector_supported_backends(CLASS_OF(self)), backend)) { rb_raise(rb_eArgError, "unsupported backend: %s", RSTRING_PTR(rb_funcall(backend, rb_intern("inspect"), 0))); } backend_id = SYM2ID(backend); if (backend_id == rb_intern("epoll")) { flags = EVBACKEND_EPOLL; } else if (backend_id == rb_intern("poll")) { flags = EVBACKEND_POLL; } else if (backend_id == rb_intern("kqueue")) { flags = EVBACKEND_KQUEUE; } else if (backend_id == rb_intern("select")) { flags = EVBACKEND_SELECT; } else if (backend_id == rb_intern("port")) { flags = EVBACKEND_PORT; } else if (backend_id == rb_intern("linuxaio")) { flags = EVBACKEND_LINUXAIO; } else if (backend_id == rb_intern("io_uring")) { flags = EVBACKEND_IOURING; } else { rb_raise(rb_eArgError, "unsupported backend: %s", RSTRING_PTR(rb_funcall(backend, rb_intern("inspect"), 0))); } } /* Ensure the selector loop has not yet been initialized */ assert(!selector->ev_loop); selector->ev_loop = ev_loop_new(flags); if (!selector->ev_loop) { rb_raise(rb_eIOError, "error initializing event loop"); } ev_io_start(selector->ev_loop, &selector->wakeup); rb_ivar_set(self, rb_intern("selectables"), rb_hash_new()); rb_ivar_set(self, rb_intern("lock_holder"), Qnil); lock = rb_class_new_instance(0, 0, rb_const_get(rb_cObject, rb_intern("Mutex"))); rb_ivar_set(self, rb_intern("lock"), lock); rb_ivar_set(self, rb_intern("lock_holder"), Qnil); return Qnil; } static VALUE NIO_Selector_backend(VALUE self) { struct NIO_Selector *selector; TypedData_Get_Struct(self, struct NIO_Selector, &NIO_Selector_type, selector); if (selector->closed) { rb_raise(rb_eIOError, "selector is closed"); } switch (ev_backend(selector->ev_loop)) { case EVBACKEND_EPOLL: return ID2SYM(rb_intern("epoll")); case EVBACKEND_POLL: return ID2SYM(rb_intern("poll")); case EVBACKEND_KQUEUE: return ID2SYM(rb_intern("kqueue")); case EVBACKEND_SELECT: return ID2SYM(rb_intern("select")); case EVBACKEND_PORT: return ID2SYM(rb_intern("port")); case EVBACKEND_LINUXAIO: return ID2SYM(rb_intern("linuxaio")); case EVBACKEND_IOURING: return ID2SYM(rb_intern("io_uring")); } return ID2SYM(rb_intern("unknown")); } /* Synchronize around a reentrant selector lock */ static VALUE NIO_Selector_synchronize(VALUE self, VALUE (*func)(VALUE arg), VALUE arg) { VALUE current_thread, lock_holder, lock; current_thread = rb_thread_current(); lock_holder = rb_ivar_get(self, rb_intern("lock_holder")); if (lock_holder != current_thread) { lock = rb_ivar_get(self, rb_intern("lock")); rb_funcall(lock, rb_intern("lock"), 0); rb_ivar_set(self, rb_intern("lock_holder"), current_thread); /* We've acquired the lock, so ensure we unlock it */ return rb_ensure(func, (VALUE)arg, NIO_Selector_unlock, self); } else { /* We already hold the selector lock, so no need to unlock it */ return func(arg); } } /* Unlock the selector mutex */ static VALUE NIO_Selector_unlock(VALUE self) { VALUE lock; rb_ivar_set(self, rb_intern("lock_holder"), Qnil); lock = rb_ivar_get(self, rb_intern("lock")); rb_funcall(lock, rb_intern("unlock"), 0); return Qnil; } /* Register an IO object with the selector for the given interests */ static VALUE NIO_Selector_register(VALUE self, VALUE io, VALUE interests) { VALUE args[3] = {self, io, interests}; return NIO_Selector_synchronize(self, NIO_Selector_register_synchronized, (VALUE)args); } /* Internal implementation of register after acquiring mutex */ static VALUE NIO_Selector_register_synchronized(VALUE _args) { VALUE self, io, interests, selectables, monitor; VALUE monitor_args[3]; struct NIO_Selector *selector; VALUE *args = (VALUE *)_args; self = args[0]; io = args[1]; interests = args[2]; TypedData_Get_Struct(self, struct NIO_Selector, &NIO_Selector_type, selector); if (selector->closed) { rb_raise(rb_eIOError, "selector is closed"); } selectables = rb_ivar_get(self, rb_intern("selectables")); monitor = rb_hash_lookup(selectables, io); if (monitor != Qnil) rb_raise(rb_eArgError, "this IO is already registered with selector"); /* Create a new NIO::Monitor */ monitor_args[0] = io; monitor_args[1] = interests; monitor_args[2] = self; monitor = rb_class_new_instance(3, monitor_args, cNIO_Monitor); rb_hash_aset(selectables, rb_funcall(monitor, rb_intern("io"), 0), monitor); return monitor; } /* Deregister an IO object from the selector */ static VALUE NIO_Selector_deregister(VALUE self, VALUE io) { VALUE args[2] = {self, io}; return NIO_Selector_synchronize(self, NIO_Selector_deregister_synchronized, (VALUE)args); } /* Internal implementation of register after acquiring mutex */ static VALUE NIO_Selector_deregister_synchronized(VALUE _args) { VALUE self, io, selectables, monitor; VALUE *args = (VALUE *)_args; self = args[0]; io = args[1]; selectables = rb_ivar_get(self, rb_intern("selectables")); monitor = rb_hash_delete(selectables, io); if (monitor != Qnil) { rb_funcall(monitor, rb_intern("close"), 1, Qfalse); } return monitor; } /* Is the given IO object registered with the selector */ static VALUE NIO_Selector_is_registered(VALUE self, VALUE io) { VALUE selectables = rb_ivar_get(self, rb_intern("selectables")); /* Perhaps this should be holding the mutex? */ return rb_funcall(selectables, rb_intern("has_key?"), 1, io); } /* Select from all registered IO objects */ static VALUE NIO_Selector_select(int argc, VALUE *argv, VALUE self) { VALUE timeout; rb_scan_args(argc, argv, "01", &timeout); if (timeout != Qnil && NUM2DBL(timeout) < 0) { rb_raise(rb_eArgError, "time interval must be positive"); } VALUE args[2] = {self, timeout}; return NIO_Selector_synchronize(self, NIO_Selector_select_synchronized, (VALUE)args); } /* Internal implementation of select with the selector lock held */ static VALUE NIO_Selector_select_synchronized(VALUE _args) { int ready; VALUE ready_array; struct NIO_Selector *selector; VALUE *args = (VALUE *)_args; TypedData_Get_Struct(args[0], struct NIO_Selector, &NIO_Selector_type, selector); if (selector->closed) { rb_raise(rb_eIOError, "selector is closed"); } if (!rb_block_given_p()) { RB_OBJ_WRITE(args[0], &selector->ready_array, rb_ary_new()); } ready = NIO_Selector_run(selector, args[1]); /* Timeout */ if (ready < 0) { if (!rb_block_given_p()) { RB_OBJ_WRITE(args[0], &selector->ready_array, Qnil); } return Qnil; } if (rb_block_given_p()) { return INT2NUM(ready); } else { ready_array = selector->ready_array; RB_OBJ_WRITE(args[0], &selector->ready_array, Qnil); return ready_array; } } static int NIO_Selector_run(struct NIO_Selector *selector, VALUE timeout) { int ev_run_flags = EVRUN_ONCE; int result; double timeout_val; selector->selecting = 1; selector->wakeup_fired = 0; if (timeout == Qnil) { /* Don't fire a wakeup timeout if we weren't passed one */ ev_timer_stop(selector->ev_loop, &selector->timer); } else { timeout_val = NUM2DBL(timeout); if (timeout_val == 0) { /* If we've been given an explicit timeout of 0, perform a non-blocking select operation */ ev_run_flags = EVRUN_NOWAIT; } else { selector->timer.repeat = timeout_val; ev_timer_again(selector->ev_loop, &selector->timer); } } /* libev is patched to release the GIL when it makes its system call */ ev_run(selector->ev_loop, ev_run_flags); result = selector->ready_count; selector->selecting = selector->ready_count = 0; if (result > 0 || selector->wakeup_fired) { selector->wakeup_fired = 0; return result; } else { return -1; } } /* Wake the selector up from another thread */ static VALUE NIO_Selector_wakeup(VALUE self) { struct NIO_Selector *selector; TypedData_Get_Struct(self, struct NIO_Selector, &NIO_Selector_type, selector); if (selector->closed) { rb_raise(rb_eIOError, "selector is closed"); } selector->wakeup_fired = 1; write(selector->wakeup_writer, "\0", 1); return Qnil; } /* Close the selector and free system resources */ static VALUE NIO_Selector_close(VALUE self) { return NIO_Selector_synchronize(self, NIO_Selector_close_synchronized, self); } static VALUE NIO_Selector_close_synchronized(VALUE self) { struct NIO_Selector *selector; TypedData_Get_Struct(self, struct NIO_Selector, &NIO_Selector_type, selector); NIO_Selector_shutdown(selector); return Qnil; } /* Is the selector closed? */ static VALUE NIO_Selector_closed(VALUE self) { return NIO_Selector_synchronize(self, NIO_Selector_closed_synchronized, self); } static VALUE NIO_Selector_closed_synchronized(VALUE self) { struct NIO_Selector *selector; TypedData_Get_Struct(self, struct NIO_Selector, &NIO_Selector_type, selector); return selector->closed ? Qtrue : Qfalse; } /* True if there are monitors on the loop */ static VALUE NIO_Selector_is_empty(VALUE self) { VALUE selectables = rb_ivar_get(self, rb_intern("selectables")); return rb_funcall(selectables, rb_intern("empty?"), 0) == Qtrue ? Qtrue : Qfalse; } /* Called whenever a timeout fires on the event loop */ static void NIO_Selector_timeout_callback(struct ev_loop *ev_loop, struct ev_timer *timer, int revents) { } /* Called whenever a wakeup request is sent to a selector */ static void NIO_Selector_wakeup_callback(struct ev_loop *ev_loop, struct ev_io *io, int revents) { char buffer[128]; struct NIO_Selector *selector = (struct NIO_Selector *)io->data; selector->selecting = 0; /* Drain the wakeup pipe, giving us level-triggered behavior */ while (read(selector->wakeup_reader, buffer, 128) > 0) ; } /* libev callback fired whenever a monitor gets an event */ void NIO_Selector_monitor_callback(struct ev_loop *ev_loop, struct ev_io *io, int revents) { struct NIO_Monitor *monitor_data = (struct NIO_Monitor *)io->data; struct NIO_Selector *selector = monitor_data->selector; VALUE monitor = monitor_data->self; assert(monitor_data->interests != 0); assert(selector != 0); selector->ready_count++; monitor_data->revents = revents; if (rb_block_given_p()) { rb_yield(monitor); } else { assert(selector->ready_array != Qnil); rb_ary_push(selector->ready_array, monitor); } } nio4r-2.7.3/ext/nio4r/nio4r_ext.c0000644000004100000410000000060014632135320016513 0ustar www-datawww-data/* * Copyright (c) 2011-2017 Tony Arcieri. Distributed under the MIT License. * See LICENSE.txt for further details. */ #include "../libev/ev.c" #include "nio4r.h" void Init_NIO_Selector(); void Init_NIO_Monitor(); void Init_NIO_ByteBuffer(); void Init_nio4r_ext() { ev_set_allocator(xrealloc); Init_NIO_Selector(); Init_NIO_Monitor(); Init_NIO_ByteBuffer(); } nio4r-2.7.3/ext/nio4r/libev.h0000644000004100000410000000020514632135320015707 0ustar www-datawww-data#ifdef _WIN32 #define EV_SELECT_IS_WINSOCKET 1 #define EV_USE_MONOTONIC 0 #define EV_USE_REALTIME 0 #endif #include "../libev/ev.h" nio4r-2.7.3/ext/nio4r/org/0000755000004100000410000000000014632135320015227 5ustar www-datawww-datanio4r-2.7.3/ext/nio4r/org/nio4r/0000755000004100000410000000000014632135320016262 5ustar www-datawww-datanio4r-2.7.3/ext/nio4r/org/nio4r/ByteBuffer.java0000644000004100000410000002267714632135320021200 0ustar www-datawww-datapackage org.nio4r; import java.io.IOException; import java.io.Serializable; import java.nio.channels.Channel; import java.nio.channels.SelectableChannel; import java.nio.channels.ReadableByteChannel; import java.nio.channels.WritableByteChannel; import java.nio.BufferOverflowException; import java.nio.BufferUnderflowException; import java.nio.InvalidMarkException; import org.jruby.Ruby; import org.jruby.RubyClass; import org.jruby.RubyIO; import org.jruby.RubyNumeric; import org.jruby.RubyObject; import org.jruby.RubyString; import org.jruby.anno.JRubyMethod; import org.jruby.exceptions.RaiseException; import org.jruby.runtime.ThreadContext; import org.jruby.runtime.builtin.IRubyObject; import org.jruby.runtime.Block; /* created by Upekshej */ public class ByteBuffer extends RubyObject { private static final long serialVersionUID = -6903439483039149324L; private transient java.nio.ByteBuffer byteBuffer; public static RaiseException newOverflowError(ThreadContext context, String message) { RubyClass klass = context.runtime.getModule("NIO").getClass("ByteBuffer").getClass("OverflowError"); return context.runtime.newRaiseException(klass, message); } public static RaiseException newUnderflowError(ThreadContext context, String message) { RubyClass klass = context.runtime.getModule("NIO").getClass("ByteBuffer").getClass("UnderflowError"); return context.runtime.newRaiseException(klass, message); } public static RaiseException newMarkUnsetError(ThreadContext context, String message) { RubyClass klass = context.runtime.getModule("NIO").getClass("ByteBuffer").getClass("MarkUnsetError"); return context.runtime.newRaiseException(klass, message); } public ByteBuffer(final Ruby ruby, RubyClass rubyClass) { super(ruby, rubyClass); } @JRubyMethod public IRubyObject initialize(ThreadContext context, IRubyObject capacity) { this.byteBuffer = java.nio.ByteBuffer.allocate(RubyNumeric.num2int(capacity)); return this; } @JRubyMethod public IRubyObject clear(ThreadContext context) { this.byteBuffer.clear(); return this; } @JRubyMethod(name = "position") public IRubyObject getPosition(ThreadContext context) { return context.getRuntime().newFixnum(this.byteBuffer.position()); } @JRubyMethod(name = "position=") public IRubyObject setPosition(ThreadContext context, IRubyObject newPosition) { int pos = RubyNumeric.num2int(newPosition); if(pos < 0) { throw context.runtime.newArgumentError("negative position given"); } if(pos > this.byteBuffer.limit()) { throw context.runtime.newArgumentError("specified position exceeds limit"); } try { this.byteBuffer.position(pos); return newPosition; } catch(IllegalArgumentException e) { throw context.runtime.newArgumentError(e.getLocalizedMessage()); } } @JRubyMethod(name = "limit") public IRubyObject getLimit(ThreadContext context) { return context.getRuntime().newFixnum(this.byteBuffer.limit()); } @JRubyMethod(name = "limit=") public IRubyObject setLimit(ThreadContext context, IRubyObject newLimit) { int lim = RubyNumeric.num2int(newLimit); if(lim < 0) { throw context.runtime.newArgumentError("negative limit given"); } if(lim > this.byteBuffer.capacity()) { throw context.runtime.newArgumentError("specified limit exceeds capacity"); } try { this.byteBuffer.limit(lim); return newLimit; } catch(IllegalArgumentException e) { throw context.runtime.newArgumentError(e.getLocalizedMessage()); } } @JRubyMethod(name = {"capacity", "size"}) public IRubyObject capacity(ThreadContext context) { return context.getRuntime().newFixnum(this.byteBuffer.capacity()); } @JRubyMethod public IRubyObject remaining(ThreadContext context) { return context.getRuntime().newFixnum(this.byteBuffer.remaining()); } @JRubyMethod(name = "full?") public IRubyObject isFull(ThreadContext context) { if (this.byteBuffer.hasRemaining()) { return context.getRuntime().getFalse(); } else { return context.getRuntime().getTrue(); } } @JRubyMethod public IRubyObject get(ThreadContext context) { return this.get(context, context.getRuntime().newFixnum(this.byteBuffer.remaining())); } @JRubyMethod public IRubyObject get(ThreadContext context, IRubyObject length) { int len = RubyNumeric.num2int(length); byte[] bytes = new byte[len]; try { this.byteBuffer.get(bytes); } catch(BufferUnderflowException e) { throw ByteBuffer.newUnderflowError(context, "not enough data in buffer"); } return RubyString.newString(context.getRuntime(), bytes); } @JRubyMethod(name = "[]") public IRubyObject fetch(ThreadContext context, IRubyObject index) { int i = RubyNumeric.num2int(index); if(i < 0) { throw context.runtime.newArgumentError("negative index given"); } if(i >= this.byteBuffer.limit()) { throw context.runtime.newArgumentError("index exceeds limit"); } return context.getRuntime().newFixnum(this.byteBuffer.get(i)); } @JRubyMethod(name = "<<") public IRubyObject put(ThreadContext context, IRubyObject str) { try { this.byteBuffer.put(str.convertToString().getByteList().bytes()); } catch(BufferOverflowException e) { throw ByteBuffer.newOverflowError(context, "buffer is full"); } return this; } @JRubyMethod(name = "read_from") public IRubyObject readFrom(ThreadContext context, IRubyObject io) { Ruby runtime = context.runtime; Channel channel = RubyIO.convertToIO(context, io).getChannel(); if(!this.byteBuffer.hasRemaining()) { throw ByteBuffer.newOverflowError(context, "buffer is full"); } if(!(channel instanceof ReadableByteChannel) || !(channel instanceof SelectableChannel)) { throw runtime.newArgumentError("unsupported IO object: " + io.getType().toString()); } try { ((SelectableChannel)channel).configureBlocking(false); } catch(IOException ie) { throw runtime.newIOError(ie.getLocalizedMessage()); } try { int bytesRead = ((ReadableByteChannel)channel).read(this.byteBuffer); if(bytesRead >= 0) { return runtime.newFixnum(bytesRead); } else { throw runtime.newEOFError(); } } catch(IOException ie) { throw runtime.newIOError(ie.getLocalizedMessage()); } } @JRubyMethod(name = "write_to") public IRubyObject writeTo(ThreadContext context, IRubyObject io) { Ruby runtime = context.runtime; Channel channel = RubyIO.convertToIO(context, io).getChannel(); if(!this.byteBuffer.hasRemaining()) { throw ByteBuffer.newUnderflowError(context, "not enough data in buffer"); } if(!(channel instanceof WritableByteChannel) || !(channel instanceof SelectableChannel)) { throw runtime.newArgumentError("unsupported IO object: " + io.getType().toString()); } try { ((SelectableChannel)channel).configureBlocking(false); } catch(IOException ie) { throw runtime.newIOError(ie.getLocalizedMessage()); } try { int bytesWritten = ((WritableByteChannel)channel).write(this.byteBuffer); if(bytesWritten >= 0) { return runtime.newFixnum(bytesWritten); } else { throw runtime.newEOFError(); } } catch(IOException ie) { throw runtime.newIOError(ie.getLocalizedMessage()); } } @JRubyMethod public IRubyObject flip(ThreadContext context) { this.byteBuffer.flip(); return this; } @JRubyMethod public IRubyObject rewind(ThreadContext context) { this.byteBuffer.rewind(); return this; } @JRubyMethod public IRubyObject mark(ThreadContext context) { this.byteBuffer.mark(); return this; } @JRubyMethod public IRubyObject reset(ThreadContext context) { try { this.byteBuffer.reset(); return this; } catch(InvalidMarkException ie) { throw ByteBuffer.newMarkUnsetError(context, "mark has not been set"); } } @JRubyMethod public IRubyObject compact(ThreadContext context) { this.byteBuffer.compact(); return this; } @JRubyMethod public IRubyObject each(ThreadContext context, Block block) { for(int i = 0; i < this.byteBuffer.limit(); i++) { block.call(context, context.getRuntime().newFixnum(this.byteBuffer.get(i))); } return this; } @JRubyMethod public IRubyObject inspect(ThreadContext context) { return context.runtime.newString(String.format( "#<%s:0x%x @position=%d @limit=%d @capacity=%d>", this.getType().toString(), System.identityHashCode(this), this.byteBuffer.position(), this.byteBuffer.limit(), this.byteBuffer.capacity() )); } } nio4r-2.7.3/ext/nio4r/org/nio4r/Monitor.java0000644000004100000410000001265514632135320020565 0ustar www-datawww-datapackage org.nio4r; import java.nio.channels.Channel; import java.nio.channels.SelectableChannel; import java.nio.channels.SelectionKey; import org.jruby.Ruby; import org.jruby.RubyClass; import org.jruby.RubyIO; import org.jruby.RubyObject; import org.jruby.anno.JRubyMethod; import org.jruby.runtime.ThreadContext; import org.jruby.runtime.builtin.IRubyObject; public class Monitor extends RubyObject { private static final long serialVersionUID = -3733782997115074794L; private transient SelectionKey key; private RubyIO io; private transient IRubyObject interests, selector, value, closed; public Monitor(final Ruby ruby, RubyClass rubyClass) { super(ruby, rubyClass); } @JRubyMethod public IRubyObject initialize(ThreadContext context, IRubyObject selectable, IRubyObject interests, IRubyObject selector) { this.io = RubyIO.convertToIO(context, selectable); this.interests = interests; this.selector = selector; this.value = context.nil; this.closed = context.getRuntime().getFalse(); return context.nil; } public void setSelectionKey(SelectionKey key) { this.key = key; key.attach(this); } @JRubyMethod public IRubyObject io(ThreadContext context) { return io; } @JRubyMethod public IRubyObject selector(ThreadContext context) { return selector; } @JRubyMethod(name = "interests") public IRubyObject getInterests(ThreadContext context) { return interests; } @JRubyMethod(name = "interests=") public IRubyObject setInterests(ThreadContext context, IRubyObject interests) { if(this.closed == context.getRuntime().getTrue()) { throw context.getRuntime().newEOFError("monitor is closed"); } Ruby ruby = context.getRuntime(); SelectableChannel channel = (SelectableChannel)io.getChannel(); if(interests != context.nil) { key.interestOps(Nio4r.symbolToInterestOps(ruby, channel, interests)); } else { key.interestOps(0); } this.interests = interests; return this.interests; } @JRubyMethod(name = "add_interest") public IRubyObject addInterest(ThreadContext context, IRubyObject interest) { if(this.closed == context.getRuntime().getTrue()) { throw context.getRuntime().newEOFError("monitor is closed"); } Ruby ruby = context.getRuntime(); SelectableChannel channel = (SelectableChannel)io.getChannel(); int newInterestOps = key.interestOps() | Nio4r.symbolToInterestOps(ruby, channel, interest); key.interestOps(newInterestOps); this.interests = Nio4r.interestOpsToSymbol(ruby, newInterestOps); return this.interests; } @JRubyMethod(name = "remove_interest") public IRubyObject removeInterest(ThreadContext context, IRubyObject interest) { if(this.closed == context.getRuntime().getTrue()) { throw context.getRuntime().newEOFError("monitor is closed"); } Ruby ruby = context.getRuntime(); SelectableChannel channel = (SelectableChannel)io.getChannel(); int newInterestOps = key.interestOps() & ~Nio4r.symbolToInterestOps(ruby, channel, interest); key.interestOps(newInterestOps); this.interests = Nio4r.interestOpsToSymbol(ruby, newInterestOps); return this.interests; } @JRubyMethod public IRubyObject readiness(ThreadContext context) { if(!key.isValid()) return this.interests; return Nio4r.interestOpsToSymbol(context.getRuntime(), key.readyOps()); } @JRubyMethod(name = "readable?") public IRubyObject isReadable(ThreadContext context) { Ruby runtime = context.getRuntime(); if (!this.key.isValid()) return runtime.getTrue(); int readyOps = this.key.readyOps(); if((readyOps & SelectionKey.OP_READ) != 0 || (readyOps & SelectionKey.OP_ACCEPT) != 0) { return runtime.getTrue(); } else { return runtime.getFalse(); } } @JRubyMethod(name = {"writable?", "writeable?"}) public IRubyObject writable(ThreadContext context) { Ruby runtime = context.getRuntime(); if (!this.key.isValid()) return runtime.getTrue(); int readyOps = this.key.readyOps(); if((readyOps & SelectionKey.OP_WRITE) != 0 || (readyOps & SelectionKey.OP_CONNECT) != 0) { return runtime.getTrue(); } else { return runtime.getFalse(); } } @JRubyMethod(name = "value") public IRubyObject getValue(ThreadContext context) { return this.value; } @JRubyMethod(name = "value=") public IRubyObject setValue(ThreadContext context, IRubyObject obj) { this.value = obj; return context.nil; } @JRubyMethod public IRubyObject close(ThreadContext context) { return close(context, context.getRuntime().getTrue()); } @JRubyMethod public IRubyObject close(ThreadContext context, IRubyObject deregister) { Ruby runtime = context.getRuntime(); this.closed = runtime.getTrue(); if(deregister == runtime.getTrue()) { selector.callMethod(context, "deregister", io); } return context.nil; } @JRubyMethod(name = "closed?") public IRubyObject isClosed(ThreadContext context) { return this.closed; } } nio4r-2.7.3/ext/nio4r/org/nio4r/Selector.java0000644000004100000410000002315114632135320020707 0ustar www-datawww-datapackage org.nio4r; import java.util.Iterator; import java.util.Map; import java.util.HashMap; import java.io.IOException; import java.nio.channels.Channel; import java.nio.channels.SelectableChannel; import java.nio.channels.SelectionKey; import org.jruby.Ruby; import org.jruby.RubyArray; import org.jruby.RubyClass; import org.jruby.RubyIO; import org.jruby.RubyNumeric; import org.jruby.RubyObject; import org.jruby.anno.JRubyMethod; import org.jruby.runtime.Block; import org.jruby.runtime.ThreadContext; import org.jruby.runtime.builtin.IRubyObject; import org.jruby.util.io.OpenFile; public class Selector extends RubyObject { private static final long serialVersionUID = -14562818539414873L; private transient java.nio.channels.Selector selector; private HashMap cancelledKeys; private volatile boolean wakeupFired; public Selector(final Ruby ruby, RubyClass rubyClass) { super(ruby, rubyClass); } @JRubyMethod(meta = true) public static IRubyObject backends(ThreadContext context, IRubyObject self) { return context.runtime.newArray(context.runtime.newSymbol("java")); } @JRubyMethod public IRubyObject initialize(ThreadContext context) { initialize(context, context.runtime.newSymbol("java")); return context.nil; } @JRubyMethod public IRubyObject initialize(ThreadContext context, IRubyObject backend) { if(backend != context.runtime.newSymbol("java") && !backend.isNil()) { throw context.runtime.newArgumentError(":java is the only supported backend"); } this.cancelledKeys = new HashMap(); this.wakeupFired = false; try { this.selector = java.nio.channels.Selector.open(); } catch(IOException ie) { throw context.runtime.newIOError(ie.getLocalizedMessage()); } return context.nil; } @JRubyMethod public IRubyObject backend(ThreadContext context) { return context.runtime.newSymbol("java"); } @JRubyMethod public IRubyObject close(ThreadContext context) { try { this.selector.close(); } catch(IOException ie) { throw context.runtime.newIOError(ie.getLocalizedMessage()); } return context.nil; } @JRubyMethod(name = "closed?") public IRubyObject isClosed(ThreadContext context) { Ruby runtime = context.getRuntime(); return this.selector.isOpen() ? runtime.getFalse() : runtime.getTrue(); } @JRubyMethod(name = "empty?") public IRubyObject isEmpty(ThreadContext context) { Ruby runtime = context.getRuntime(); return this.selector.keys().isEmpty() ? runtime.getTrue() : runtime.getFalse(); } @JRubyMethod public IRubyObject register(ThreadContext context, IRubyObject io, IRubyObject interests) { Ruby runtime = context.getRuntime(); Channel rawChannel = RubyIO.convertToIO(context, io).getChannel(); if(!this.selector.isOpen()) { throw context.getRuntime().newIOError("selector is closed"); } if(!(rawChannel instanceof SelectableChannel)) { throw runtime.newArgumentError("not a selectable IO object"); } SelectableChannel channel = (SelectableChannel)rawChannel; try { channel.configureBlocking(false); } catch(IOException ie) { throw runtime.newIOError(ie.getLocalizedMessage()); } int interestOps = Nio4r.symbolToInterestOps(runtime, channel, interests); SelectionKey key; key = this.cancelledKeys.remove(channel); if(key != null) { key.interestOps(interestOps); } else { try { key = channel.register(this.selector, interestOps); } catch(java.lang.IllegalArgumentException ia) { throw runtime.newArgumentError("mode not supported for this object: " + interests); } catch(java.nio.channels.ClosedChannelException cce) { throw context.runtime.newIOError(cce.getLocalizedMessage()); } } RubyClass monitorClass = runtime.getModule("NIO").getClass("Monitor"); Monitor monitor = (Monitor)monitorClass.newInstance(context, io, interests, this, null); monitor.setSelectionKey(key); return monitor; } @JRubyMethod public IRubyObject deregister(ThreadContext context, IRubyObject io) { Ruby runtime = context.getRuntime(); OpenFile file = RubyIO.convertToIO(context, io).getOpenFileInitialized(); if (file.fd() == null) return context.nil; Channel rawChannel = file.channel(); if(!(rawChannel instanceof SelectableChannel)) { throw runtime.newArgumentError("not a selectable IO object"); } SelectableChannel channel = (SelectableChannel)rawChannel; SelectionKey key = channel.keyFor(this.selector); if(key == null) return context.nil; Monitor monitor = (Monitor)key.attachment(); monitor.close(context, runtime.getFalse()); cancelledKeys.put(channel, key); return monitor; } @JRubyMethod(name = "registered?") public IRubyObject isRegistered(ThreadContext context, IRubyObject io) { Ruby runtime = context.getRuntime(); Channel rawChannel = RubyIO.convertToIO(context, io).getChannel(); if(!(rawChannel instanceof SelectableChannel)) { throw runtime.newArgumentError("not a selectable IO object"); } SelectableChannel channel = (SelectableChannel)rawChannel; SelectionKey key = channel.keyFor(this.selector); if(key == null) return context.nil; if(((Monitor)key.attachment()).isClosed(context) == runtime.getTrue()) { return runtime.getFalse(); } else { return runtime.getTrue(); } } @JRubyMethod public synchronized IRubyObject select(ThreadContext context, Block block) { return select(context, context.nil, block); } @JRubyMethod public synchronized IRubyObject select(ThreadContext context, IRubyObject timeout, Block block) { Ruby runtime = context.getRuntime(); if(!this.selector.isOpen()) { throw context.getRuntime().newIOError("selector is closed"); } this.wakeupFired = false; int ready = doSelect(runtime, context, timeout); /* Timeout */ if(ready <= 0 && !this.wakeupFired) { return context.nil; } RubyArray array = null; if(!block.isGiven()) { array = runtime.newArray(this.selector.selectedKeys().size()); } Iterator selectedKeys = this.selector.selectedKeys().iterator(); while(selectedKeys.hasNext()) { SelectionKey key = selectedKeys.next(); processKey(key); selectedKeys.remove(); if(block.isGiven()) { block.call(context, (IRubyObject)key.attachment()); } else { array.add(key.attachment()); } } if(block.isGiven()) { return RubyNumeric.int2fix(runtime, ready); } else { return array; } } /* Run the selector */ private int doSelect(Ruby runtime, ThreadContext context, IRubyObject timeout) { int result; cancelKeys(); try { context.getThread().beforeBlockingCall(context); if(timeout.isNil()) { result = this.selector.select(); } else { double t = RubyNumeric.num2dbl(timeout); if(t == 0) { result = this.selector.selectNow(); } else if(t < 0) { throw runtime.newArgumentError("time interval must be positive"); } else { long timeoutMilliSeconds = (long)(t * 1000); if(timeoutMilliSeconds == 0) { result = this.selector.selectNow(); } else { result = this.selector.select(timeoutMilliSeconds); } } } context.getThread().afterBlockingCall(); return result; } catch(IOException ie) { throw runtime.newIOError(ie.getLocalizedMessage()); } } /* Flush our internal buffer of cancelled keys */ private void cancelKeys() { Iterator> cancelledKeys = this.cancelledKeys.entrySet().iterator(); while(cancelledKeys.hasNext()) { Map.Entry entry = cancelledKeys.next(); SelectionKey key = entry.getValue(); key.cancel(); cancelledKeys.remove(); } } // Remove connect interest from connected sockets // See: http://stackoverflow.com/questions/204186/java-nio-select-returns-without-selected-keys-why private void processKey(SelectionKey key) { if(key.isValid() && (key.readyOps() & SelectionKey.OP_CONNECT) != 0) { int interestOps = key.interestOps(); interestOps &= ~SelectionKey.OP_CONNECT; interestOps |= SelectionKey.OP_WRITE; key.interestOps(interestOps); } } @JRubyMethod public IRubyObject wakeup(ThreadContext context) { if(!this.selector.isOpen()) { throw context.getRuntime().newIOError("selector is closed"); } this.wakeupFired = true; this.selector.wakeup(); return context.nil; } } nio4r-2.7.3/ext/nio4r/org/nio4r/Nio4r.java0000644000004100000410000000765614632135320020136 0ustar www-datawww-datapackage org.nio4r; import java.nio.channels.SelectableChannel; import java.nio.channels.SelectionKey; import java.nio.channels.SocketChannel; import org.jruby.Ruby; import org.jruby.RubyClass; import org.jruby.RubyModule; import org.jruby.runtime.ObjectAllocator; import org.jruby.runtime.load.Library; import org.jruby.runtime.builtin.IRubyObject; import org.nio4r.ByteBuffer; import org.nio4r.Monitor; import org.nio4r.Selector; public class Nio4r implements Library { private Ruby ruby; public void load(final Ruby ruby, boolean bln) { this.ruby = ruby; RubyModule nio = ruby.defineModule("NIO"); RubyClass selector = ruby.defineClassUnder("Selector", ruby.getObject(), new ObjectAllocator() { public IRubyObject allocate(Ruby ruby, RubyClass rc) { return new Selector(ruby, rc); } }, nio); selector.defineAnnotatedMethods(Selector.class); RubyClass monitor = ruby.defineClassUnder("Monitor", ruby.getObject(), new ObjectAllocator() { public IRubyObject allocate(Ruby ruby, RubyClass rc) { return new Monitor(ruby, rc); } }, nio); monitor.defineAnnotatedMethods(Monitor.class); RubyClass byteBuffer = ruby.defineClassUnder("ByteBuffer", ruby.getObject(), new ObjectAllocator() { public IRubyObject allocate(Ruby ruby, RubyClass rc) { return new ByteBuffer(ruby, rc); } }, nio); byteBuffer.defineAnnotatedMethods(ByteBuffer.class); byteBuffer.includeModule(ruby.getEnumerable()); ruby.defineClassUnder("OverflowError", ruby.getIOError(), ruby.getIOError().getAllocator(), byteBuffer); ruby.defineClassUnder("UnderflowError", ruby.getIOError(), ruby.getIOError().getAllocator(), byteBuffer); ruby.defineClassUnder("MarkUnsetError", ruby.getIOError(), ruby.getIOError().getAllocator(), byteBuffer); } public static int symbolToInterestOps(Ruby ruby, SelectableChannel channel, IRubyObject interest) { if(interest == ruby.newSymbol("r")) { if((channel.validOps() & SelectionKey.OP_ACCEPT) != 0) { return SelectionKey.OP_ACCEPT; } else { return SelectionKey.OP_READ; } } else if(interest == ruby.newSymbol("w")) { if(channel instanceof SocketChannel && !((SocketChannel)channel).isConnected()) { return SelectionKey.OP_CONNECT; } else { return SelectionKey.OP_WRITE; } } else if(interest == ruby.newSymbol("rw")) { int interestOps = 0; /* nio4r emulates the POSIX behavior, which is sloppy about allowed modes */ if((channel.validOps() & (SelectionKey.OP_READ | SelectionKey.OP_ACCEPT)) != 0) { interestOps |= symbolToInterestOps(ruby, channel, ruby.newSymbol("r")); } if((channel.validOps() & (SelectionKey.OP_WRITE | SelectionKey.OP_CONNECT)) != 0) { interestOps |= symbolToInterestOps(ruby, channel, ruby.newSymbol("w")); } return interestOps; } else { throw ruby.newArgumentError("invalid interest type: " + interest); } } public static IRubyObject interestOpsToSymbol(Ruby ruby, int interestOps) { switch(interestOps) { case SelectionKey.OP_READ: case SelectionKey.OP_ACCEPT: return ruby.newSymbol("r"); case SelectionKey.OP_WRITE: case SelectionKey.OP_CONNECT: return ruby.newSymbol("w"); case SelectionKey.OP_READ | SelectionKey.OP_CONNECT: case SelectionKey.OP_READ | SelectionKey.OP_WRITE: return ruby.newSymbol("rw"); case 0: return ruby.getNil(); default: throw ruby.newArgumentError("unknown interest op combination"); } } } nio4r-2.7.3/ext/nio4r/extconf.rb0000644000004100000410000000332314632135320016434 0ustar www-datawww-data# frozen_string_literal: true # Released under the MIT License. # Copyright, 2011-2020, by Tony Arcieri. # Copyright, 2014, by Hiroshi Shibata. # Copyright, 2014, by Sergey Avseyev. # Copyright, 2015, by Daniel Berger. # Copyright, 2017, by Jun Aruga. # Copyright, 2017, by Usaku Nakamura. # Copyright, 2017, by Lars Kanis. # Copyright, 2019-2023, by Samuel Williams. # Copyright, 2020, by Gregory Longtin. # Copyright, 2020, by Boaz Segev. # Copyright, 2020, by Joao Fernandes. # Copyright, 2021, by Jeffrey Martin. require "rubygems" # Write a dummy Makefile on Windows because we use the pure Ruby implementation there if Gem.win_platform? begin require "devkit" if RUBY_PLATFORM.include?("mingw") rescue LoadError => e end File.write("Makefile", "all install::\n") File.write("nio4r_ext.so", "") exit end require "mkmf" have_header("unistd.h") have_func("rb_io_descriptor") $defs << "-DEV_USE_LINUXAIO" if have_header("linux/aio_abi.h") $defs << "-DEV_USE_IOURING" if have_header("linux/io_uring.h") $defs << "-DEV_USE_SELECT" if have_header("sys/select.h") $defs << "-DEV_USE_POLL" if have_type("port_event_t", "poll.h") $defs << "-DEV_USE_EPOLL" if have_header("sys/epoll.h") $defs << "-DEV_USE_KQUEUE" if have_header("sys/event.h") && have_header("sys/queue.h") $defs << "-DEV_USE_PORT" if have_type("port_event_t", "port.h") $defs << "-DHAVE_SYS_RESOURCE_H" if have_header("sys/resource.h") $defs << "-DEV_STANDALONE" # prevent libev from assuming "config.h" exists CONFIG["optflags"] << " -fno-strict-aliasing" unless RUBY_PLATFORM =~ /mswin/ if RUBY_PLATFORM =~ /darwin/ $DLDFLAGS.gsub!(/\-arch\s+[^\s]+/, "") end dir_config "nio4r_ext" create_makefile "nio4r_ext" nio4r-2.7.3/ext/nio4r/nio4r.h0000644000004100000410000000200114632135320015635 0ustar www-datawww-data/* * Copyright (c) 2011 Tony Arcieri. Distributed under the MIT License. See * LICENSE.txt for further details. */ #ifndef NIO4R_H #define NIO4R_H #include "libev.h" #include "ruby.h" #include "ruby/io.h" struct NIO_Selector { struct ev_loop *ev_loop; struct ev_timer timer; /* for timeouts */ struct ev_io wakeup; int ready_count; int closed, selecting; int wakeup_reader, wakeup_writer; volatile int wakeup_fired; VALUE ready_array; }; struct NIO_callback_data { VALUE *monitor; struct NIO_Selector *selector; }; struct NIO_Monitor { VALUE self; int interests, revents; struct ev_io ev_io; struct NIO_Selector *selector; }; struct NIO_ByteBuffer { char *buffer; int position, limit, capacity, mark; }; struct NIO_Selector *NIO_Selector_unwrap(VALUE selector); /* Thunk between libev callbacks in NIO::Monitors and NIO::Selectors */ void NIO_Selector_monitor_callback(struct ev_loop *ev_loop, struct ev_io *io, int revents); #endif /* NIO4R_H */ nio4r-2.7.3/ext/libev/0000755000004100000410000000000014632135320014506 5ustar www-datawww-datanio4r-2.7.3/ext/libev/ev_epoll.c0000644000004100000410000002417514632135320016470 0ustar www-datawww-data/* * libev epoll fd activity backend * * Copyright (c) 2007,2008,2009,2010,2011,2016,2017,2019 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ /* * general notes about epoll: * * a) epoll silently removes fds from the fd set. as nothing tells us * that an fd has been removed otherwise, we have to continually * "rearm" fds that we suspect *might* have changed (same * problem with kqueue, but much less costly there). * b) the fact that ADD != MOD creates a lot of extra syscalls due to a) * and seems not to have any advantage. * c) the inability to handle fork or file descriptors (think dup) * limits the applicability over poll, so this is not a generic * poll replacement. * d) epoll doesn't work the same as select with many file descriptors * (such as files). while not critical, no other advanced interface * seems to share this (rather non-unixy) limitation. * e) epoll claims to be embeddable, but in practise you never get * a ready event for the epoll fd (broken: <=2.6.26, working: >=2.6.32). * f) epoll_ctl returning EPERM means the fd is always ready. * * lots of "weird code" and complication handling in this file is due * to these design problems with epoll, as we try very hard to avoid * epoll_ctl syscalls for common usage patterns and handle the breakage * ensuing from receiving events for closed and otherwise long gone * file descriptors. */ #include #define EV_EMASK_EPERM 0x80 static void epoll_modify (EV_P_ int fd, int oev, int nev) { struct epoll_event ev; unsigned char oldmask; /* * we handle EPOLL_CTL_DEL by ignoring it here * on the assumption that the fd is gone anyways * if that is wrong, we have to handle the spurious * event in epoll_poll. * if the fd is added again, we try to ADD it, and, if that * fails, we assume it still has the same eventmask. */ if (!nev) return; oldmask = anfds [fd].emask; anfds [fd].emask = nev; /* store the generation counter in the upper 32 bits, the fd in the lower 32 bits */ ev.data.u64 = (uint64_t)(uint32_t)fd | ((uint64_t)(uint32_t)++anfds [fd].egen << 32); ev.events = (nev & EV_READ ? EPOLLIN : 0) | (nev & EV_WRITE ? EPOLLOUT : 0); if (ecb_expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) return; if (ecb_expect_true (errno == ENOENT)) { /* if ENOENT then the fd went away, so try to do the right thing */ if (!nev) goto dec_egen; if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) return; } else if (ecb_expect_true (errno == EEXIST)) { /* EEXIST means we ignored a previous DEL, but the fd is still active */ /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ if (oldmask == nev) goto dec_egen; if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) return; } else if (ecb_expect_true (errno == EPERM)) { /* EPERM means the fd is always ready, but epoll is too snobbish */ /* to handle it, unlike select or poll. */ anfds [fd].emask = EV_EMASK_EPERM; /* add fd to epoll_eperms, if not already inside */ if (!(oldmask & EV_EMASK_EPERM)) { array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, array_needsize_noinit); epoll_eperms [epoll_epermcnt++] = fd; } return; } else assert (("libev: I/O watcher with invalid fd found in epoll_ctl", errno != EBADF && errno != ELOOP && errno != EINVAL)); fd_kill (EV_A_ fd); dec_egen: /* we didn't successfully call epoll_ctl, so decrement the generation counter again */ --anfds [fd].egen; } static void epoll_poll (EV_P_ ev_tstamp timeout) { int i; int eventcnt; if (ecb_expect_false (epoll_epermcnt)) timeout = EV_TS_CONST (0.); /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */ /* the default libev max wait time, however. */ EV_RELEASE_CB; eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, EV_TS_TO_MSEC (timeout)); EV_ACQUIRE_CB; if (ecb_expect_false (eventcnt < 0)) { if (errno != EINTR) ev_syserr ("(libev) epoll_wait"); return; } for (i = 0; i < eventcnt; ++i) { struct epoll_event *ev = epoll_events + i; int fd = (uint32_t)ev->data.u64; /* mask out the lower 32 bits */ int want = anfds [fd].events; int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); /* * check for spurious notification. * this only finds spurious notifications on egen updates * other spurious notifications will be found by epoll_ctl, below * we assume that fd is always in range, as we never shrink the anfds array */ if (ecb_expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32))) { /* recreate kernel state */ postfork |= 2; continue; } if (ecb_expect_false (got & ~want)) { anfds [fd].emask = want; /* * we received an event but are not interested in it, try mod or del * this often happens because we optimistically do not unregister fds * when we are no longer interested in them, but also when we get spurious * notifications for fds from another process. this is partially handled * above with the gencounter check (== our fd is not the event fd), and * partially here, when epoll_ctl returns an error (== a child has the fd * but we closed it). * note: for events such as POLLHUP, where we can't know whether it refers * to EV_READ or EV_WRITE, we might issue redundant EPOLL_CTL_MOD calls. */ ev->events = (want & EV_READ ? EPOLLIN : 0) | (want & EV_WRITE ? EPOLLOUT : 0); /* pre-2.6.9 kernels require a non-null pointer with EPOLL_CTL_DEL, */ /* which is fortunately easy to do for us. */ if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) { postfork |= 2; /* an error occurred, recreate kernel state */ continue; } } fd_event (EV_A_ fd, got); } /* if the receive array was full, increase its size */ if (ecb_expect_false (eventcnt == epoll_eventmax)) { ev_free (epoll_events); epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); } /* now synthesize events for all fds where epoll fails, while select works... */ for (i = epoll_epermcnt; i--; ) { int fd = epoll_eperms [i]; unsigned char events = anfds [fd].events & (EV_READ | EV_WRITE); if (anfds [fd].emask & EV_EMASK_EPERM && events) fd_event (EV_A_ fd, events); else { epoll_eperms [i] = epoll_eperms [--epoll_epermcnt]; anfds [fd].emask = 0; } } } static int epoll_epoll_create (void) { int fd; #if defined EPOLL_CLOEXEC && !defined __ANDROID__ fd = epoll_create1 (EPOLL_CLOEXEC); if (fd < 0 && (errno == EINVAL || errno == ENOSYS)) #endif { fd = epoll_create (256); if (fd >= 0) fcntl (fd, F_SETFD, FD_CLOEXEC); } return fd; } inline_size int epoll_init (EV_P_ int flags) { if ((backend_fd = epoll_epoll_create ()) < 0) return 0; backend_mintime = EV_TS_CONST (1e-3); /* epoll does sometimes return early, this is just to avoid the worst */ backend_modify = epoll_modify; backend_poll = epoll_poll; epoll_eventmax = 64; /* initial number of events receivable per poll */ epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); return EVBACKEND_EPOLL; } inline_size void epoll_destroy (EV_P) { ev_free (epoll_events); array_free (epoll_eperm, EMPTY); } ecb_cold static void epoll_fork (EV_P) { close (backend_fd); while ((backend_fd = epoll_epoll_create ()) < 0) ev_syserr ("(libev) epoll_create"); fd_rearm_all (EV_A); } nio4r-2.7.3/ext/libev/ev_iouring.c0000644000004100000410000005131414632135320017024 0ustar www-datawww-data/* * libev linux io_uring fd activity backend * * Copyright (c) 2019-2020 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ /* * general notes about linux io_uring: * * a) it's the best interface I have seen so far. on linux. * b) best is not necessarily very good. * c) it's better than the aio mess, doesn't suffer from the fork problems * of linux aio or epoll and so on and so on. and you could do event stuff * without any syscalls. what's not to like? * d) ok, it's vastly more complex, but that's ok, really. * e) why two mmaps instead of one? one would be more space-efficient, * and I can't see what benefit two would have (other than being * somehow resizable/relocatable, but that's apparently not possible). * f) hmm, it's practically undebuggable (gdb can't access the memory, and * the bizarre way structure offsets are communicated makes it hard to * just print the ring buffer heads, even *iff* the memory were visible * in gdb. but then, that's also ok, really. * g) well, you cannot specify a timeout when waiting for events. no, * seriously, the interface doesn't support a timeout. never seen _that_ * before. sure, you can use a timerfd, but that's another syscall * you could have avoided. overall, this bizarre omission smells * like a µ-optimisation by the io_uring author for his personal * applications, to the detriment of everybody else who just wants * an event loop. but, umm, ok, if that's all, it could be worse. * (from what I gather from the author Jens Axboe, it simply didn't * occur to him, and he made good on it by adding an unlimited nuber * of timeouts later :). * h) initially there was a hardcoded limit of 4096 outstanding events. * later versions not only bump this to 32k, but also can handle * an unlimited amount of events, so this only affects the batch size. * i) unlike linux aio, you *can* register more then the limit * of fd events. while early verisons of io_uring signalled an overflow * and you ended up getting wet. 5.5+ does not do this anymore. * j) but, oh my! it had exactly the same bugs as the linux aio backend, * where some undocumented poll combinations just fail. fortunately, * after finally reaching the author, he was more than willing to fix * this probably in 5.6+. * k) overall, the *API* itself is, I dare to say, not a total trainwreck. * once the bugs ae fixed (probably in 5.6+), it will be without * competition. */ /* TODO: use internal TIMEOUT */ /* TODO: take advantage of single mmap, NODROP etc. */ /* TODO: resize cq/sq size independently */ #include #include #include #include #define IOURING_INIT_ENTRIES 32 /*****************************************************************************/ /* syscall wrapdadoop - this section has the raw api/abi definitions */ #include #include /* mostly directly taken from the kernel or documentation */ struct io_uring_sqe { __u8 opcode; __u8 flags; __u16 ioprio; __s32 fd; union { __u64 off; __u64 addr2; }; __u64 addr; __u32 len; union { __kernel_rwf_t rw_flags; __u32 fsync_flags; __u16 poll_events; __u32 sync_range_flags; __u32 msg_flags; __u32 timeout_flags; __u32 accept_flags; __u32 cancel_flags; __u32 open_flags; __u32 statx_flags; }; __u64 user_data; union { __u16 buf_index; __u64 __pad2[3]; }; }; struct io_uring_cqe { __u64 user_data; __s32 res; __u32 flags; }; struct io_sqring_offsets { __u32 head; __u32 tail; __u32 ring_mask; __u32 ring_entries; __u32 flags; __u32 dropped; __u32 array; __u32 resv1; __u64 resv2; }; struct io_cqring_offsets { __u32 head; __u32 tail; __u32 ring_mask; __u32 ring_entries; __u32 overflow; __u32 cqes; __u64 resv[2]; }; struct io_uring_params { __u32 sq_entries; __u32 cq_entries; __u32 flags; __u32 sq_thread_cpu; __u32 sq_thread_idle; __u32 features; __u32 resv[4]; struct io_sqring_offsets sq_off; struct io_cqring_offsets cq_off; }; #define IORING_SETUP_CQSIZE 0x00000008 #define IORING_OP_POLL_ADD 6 #define IORING_OP_POLL_REMOVE 7 #define IORING_OP_TIMEOUT 11 #define IORING_OP_TIMEOUT_REMOVE 12 /* relative or absolute, reference clock is CLOCK_MONOTONIC */ struct iouring_kernel_timespec { int64_t tv_sec; long long tv_nsec; }; #define IORING_TIMEOUT_ABS 0x00000001 #define IORING_ENTER_GETEVENTS 0x01 #define IORING_OFF_SQ_RING 0x00000000ULL #define IORING_OFF_CQ_RING 0x08000000ULL #define IORING_OFF_SQES 0x10000000ULL #define IORING_FEAT_SINGLE_MMAP 0x00000001 #define IORING_FEAT_NODROP 0x00000002 #define IORING_FEAT_SUBMIT_STABLE 0x00000004 inline_size int evsys_io_uring_setup (unsigned entries, struct io_uring_params *params) { return ev_syscall2 (SYS_io_uring_setup, entries, params); } inline_size int evsys_io_uring_enter (int fd, unsigned to_submit, unsigned min_complete, unsigned flags, const sigset_t *sig, size_t sigsz) { return ev_syscall6 (SYS_io_uring_enter, fd, to_submit, min_complete, flags, sig, sigsz); } /*****************************************************************************/ /* actual backed implementation */ /* we hope that volatile will make the compiler access this variables only once */ #define EV_SQ_VAR(name) *(volatile unsigned *)((char *)iouring_sq_ring + iouring_sq_ ## name) #define EV_CQ_VAR(name) *(volatile unsigned *)((char *)iouring_cq_ring + iouring_cq_ ## name) /* the index array */ #define EV_SQ_ARRAY ((unsigned *)((char *)iouring_sq_ring + iouring_sq_array)) /* the submit/completion queue entries */ #define EV_SQES ((struct io_uring_sqe *) iouring_sqes) #define EV_CQES ((struct io_uring_cqe *)((char *)iouring_cq_ring + iouring_cq_cqes)) inline_speed int iouring_enter (EV_P_ ev_tstamp timeout) { int res; EV_RELEASE_CB; res = evsys_io_uring_enter (iouring_fd, iouring_to_submit, 1, timeout > EV_TS_CONST (0.) ? IORING_ENTER_GETEVENTS : 0, 0, 0); assert (("libev: io_uring_enter did not consume all sqes", (res < 0 || res == iouring_to_submit))); iouring_to_submit = 0; EV_ACQUIRE_CB; return res; } /* TODO: can we move things around so we don't need this forward-reference? */ static void iouring_poll (EV_P_ ev_tstamp timeout); static struct io_uring_sqe * iouring_sqe_get (EV_P) { unsigned tail; for (;;) { tail = EV_SQ_VAR (tail); if (ecb_expect_true (tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries))) break; /* whats the problem, we have free sqes */ /* queue full, need to flush and possibly handle some events */ #if EV_FEATURE_CODE /* first we ask the kernel nicely, most often this frees up some sqes */ int res = iouring_enter (EV_A_ EV_TS_CONST (0.)); ECB_MEMORY_FENCE_ACQUIRE; /* better safe than sorry */ if (res >= 0) continue; /* yes, it worked, try again */ #endif /* some problem, possibly EBUSY - do the full poll and let it handle any issues */ iouring_poll (EV_A_ EV_TS_CONST (0.)); /* iouring_poll should have done ECB_MEMORY_FENCE_ACQUIRE for us */ } /*assert (("libev: io_uring queue full after flush", tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries)));*/ return EV_SQES + (tail & EV_SQ_VAR (ring_mask)); } inline_size void iouring_sqe_submit (EV_P_ struct io_uring_sqe *sqe) { unsigned idx = sqe - EV_SQES; EV_SQ_ARRAY [idx] = idx; ECB_MEMORY_FENCE_RELEASE; ++EV_SQ_VAR (tail); /*ECB_MEMORY_FENCE_RELEASE; /* for the time being we assume this is not needed */ ++iouring_to_submit; } /*****************************************************************************/ /* when the timerfd expires we simply note the fact, * as the purpose of the timerfd is to wake us up, nothing else. * the next iteration should re-set it. */ static void iouring_tfd_cb (EV_P_ struct ev_io *w, int revents) { iouring_tfd_to = EV_TSTAMP_HUGE; } /* called for full and partial cleanup */ ecb_cold static void iouring_internal_destroy (EV_P) { close (iouring_tfd); close (iouring_fd); if (iouring_sq_ring != MAP_FAILED) munmap (iouring_sq_ring, iouring_sq_ring_size); if (iouring_cq_ring != MAP_FAILED) munmap (iouring_cq_ring, iouring_cq_ring_size); if (iouring_sqes != MAP_FAILED) munmap (iouring_sqes , iouring_sqes_size ); if (ev_is_active (&iouring_tfd_w)) { ev_ref (EV_A); ev_io_stop (EV_A_ &iouring_tfd_w); } } ecb_cold static int iouring_internal_init (EV_P) { struct io_uring_params params = { 0 }; iouring_to_submit = 0; iouring_tfd = -1; iouring_sq_ring = MAP_FAILED; iouring_cq_ring = MAP_FAILED; iouring_sqes = MAP_FAILED; if (!have_monotonic) /* cannot really happen, but what if11 */ return -1; for (;;) { iouring_fd = evsys_io_uring_setup (iouring_entries, ¶ms); if (iouring_fd >= 0) break; /* yippie */ if (errno != EINVAL) return -1; /* we failed */ #if TODO if ((~params.features) & (IORING_FEAT_NODROP | IORING_FEATURE_SINGLE_MMAP | IORING_FEAT_SUBMIT_STABLE)) return -1; /* we require the above features */ #endif /* EINVAL: lots of possible reasons, but maybe * it is because we hit the unqueryable hardcoded size limit */ /* we hit the limit already, give up */ if (iouring_max_entries) return -1; /* first time we hit EINVAL? assume we hit the limit, so go back and retry */ iouring_entries >>= 1; iouring_max_entries = iouring_entries; } iouring_sq_ring_size = params.sq_off.array + params.sq_entries * sizeof (unsigned); iouring_cq_ring_size = params.cq_off.cqes + params.cq_entries * sizeof (struct io_uring_cqe); iouring_sqes_size = params.sq_entries * sizeof (struct io_uring_sqe); iouring_sq_ring = mmap (0, iouring_sq_ring_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQ_RING); iouring_cq_ring = mmap (0, iouring_cq_ring_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_CQ_RING); iouring_sqes = mmap (0, iouring_sqes_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQES); if (iouring_sq_ring == MAP_FAILED || iouring_cq_ring == MAP_FAILED || iouring_sqes == MAP_FAILED) return -1; iouring_sq_head = params.sq_off.head; iouring_sq_tail = params.sq_off.tail; iouring_sq_ring_mask = params.sq_off.ring_mask; iouring_sq_ring_entries = params.sq_off.ring_entries; iouring_sq_flags = params.sq_off.flags; iouring_sq_dropped = params.sq_off.dropped; iouring_sq_array = params.sq_off.array; iouring_cq_head = params.cq_off.head; iouring_cq_tail = params.cq_off.tail; iouring_cq_ring_mask = params.cq_off.ring_mask; iouring_cq_ring_entries = params.cq_off.ring_entries; iouring_cq_overflow = params.cq_off.overflow; iouring_cq_cqes = params.cq_off.cqes; iouring_tfd = timerfd_create (CLOCK_MONOTONIC, TFD_CLOEXEC); if (iouring_tfd < 0) return iouring_tfd; iouring_tfd_to = EV_TSTAMP_HUGE; return 0; } ecb_cold static void iouring_fork (EV_P) { iouring_internal_destroy (EV_A); while (iouring_internal_init (EV_A) < 0) ev_syserr ("(libev) io_uring_setup"); fd_rearm_all (EV_A); ev_io_stop (EV_A_ &iouring_tfd_w); ev_io_set (EV_A_ &iouring_tfd_w, iouring_tfd, EV_READ); ev_io_start (EV_A_ &iouring_tfd_w); } /*****************************************************************************/ static void iouring_modify (EV_P_ int fd, int oev, int nev) { if (oev) { /* we assume the sqe's are all "properly" initialised */ struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); sqe->opcode = IORING_OP_POLL_REMOVE; sqe->fd = fd; /* Jens Axboe notified me that user_data is not what is documented, but is * some kind of unique ID that has to match, otherwise the request cannot * be removed. Since we don't *really* have that, we pass in the old * generation counter - if that fails, too bad, it will hopefully be removed * at close time and then be ignored. */ sqe->addr = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32); sqe->user_data = (uint64_t)-1; iouring_sqe_submit (EV_A_ sqe); /* increment generation counter to avoid handling old events */ ++anfds [fd].egen; } if (nev) { struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); sqe->opcode = IORING_OP_POLL_ADD; sqe->fd = fd; sqe->addr = 0; sqe->user_data = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32); sqe->poll_events = (nev & EV_READ ? POLLIN : 0) | (nev & EV_WRITE ? POLLOUT : 0); iouring_sqe_submit (EV_A_ sqe); } } inline_size void iouring_tfd_update (EV_P_ ev_tstamp timeout) { ev_tstamp tfd_to = mn_now + timeout; /* we assume there will be many iterations per timer change, so * we only re-set the timerfd when we have to because its expiry * is too late. */ if (ecb_expect_false (tfd_to < iouring_tfd_to)) { struct itimerspec its; iouring_tfd_to = tfd_to; EV_TS_SET (its.it_interval, 0.); EV_TS_SET (its.it_value, tfd_to); if (timerfd_settime (iouring_tfd, TFD_TIMER_ABSTIME, &its, 0) < 0) assert (("libev: iouring timerfd_settime failed", 0)); } } inline_size void iouring_process_cqe (EV_P_ struct io_uring_cqe *cqe) { int fd = cqe->user_data & 0xffffffffU; uint32_t gen = cqe->user_data >> 32; int res = cqe->res; /* user_data -1 is a remove that we are not atm. interested in */ if (cqe->user_data == (uint64_t)-1) return; assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax)); /* documentation lies, of course. the result value is NOT like * normal syscalls, but like linux raw syscalls, i.e. negative * error numbers. fortunate, as otherwise there would be no way * to get error codes at all. still, why not document this? */ /* ignore event if generation doesn't match */ /* other than skipping removal events, */ /* this should actually be very rare */ if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen)) return; if (ecb_expect_false (res < 0)) { /*TODO: EINVAL handling (was something failed with this fd)*/ if (res == -EBADF) { assert (("libev: event loop rejected bad fd", res != -EBADF)); fd_kill (EV_A_ fd); } else { errno = -res; ev_syserr ("(libev) IORING_OP_POLL_ADD"); } return; } /* feed events, we do not expect or handle POLLNVAL */ fd_event ( EV_A_ fd, (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) ); /* io_uring is oneshot, so we need to re-arm the fd next iteration */ /* this also means we usually have to do at least one syscall per iteration */ anfds [fd].events = 0; fd_change (EV_A_ fd, EV_ANFD_REIFY); } /* called when the event queue overflows */ ecb_cold static void iouring_overflow (EV_P) { /* we have two options, resize the queue (by tearing down * everything and recreating it, or living with it * and polling. * we implement this by resizing the queue, and, if that fails, * we just recreate the state on every failure, which * kind of is a very inefficient poll. * one danger is, due to the bios toward lower fds, * we will only really get events for those, so * maybe we need a poll() fallback, after all. */ /*EV_CQ_VAR (overflow) = 0;*/ /* need to do this if we keep the state and poll manually */ fd_rearm_all (EV_A); /* we double the size until we hit the hard-to-probe maximum */ if (!iouring_max_entries) { iouring_entries <<= 1; iouring_fork (EV_A); } else { /* we hit the kernel limit, we should fall back to something else. * we can either poll() a few times and hope for the best, * poll always, or switch to epoll. * TODO: is this necessary with newer kernels? */ iouring_internal_destroy (EV_A); /* this should make it so that on return, we don't call any uring functions */ iouring_to_submit = 0; for (;;) { backend = epoll_init (EV_A_ 0); if (backend) break; ev_syserr ("(libev) iouring switch to epoll"); } } } /* handle any events in the completion queue, return true if there were any */ static int iouring_handle_cq (EV_P) { unsigned head, tail, mask; head = EV_CQ_VAR (head); ECB_MEMORY_FENCE_ACQUIRE; tail = EV_CQ_VAR (tail); if (head == tail) return 0; /* it can only overflow if we have events, yes, yes? */ if (ecb_expect_false (EV_CQ_VAR (overflow))) { iouring_overflow (EV_A); return 1; } mask = EV_CQ_VAR (ring_mask); do iouring_process_cqe (EV_A_ &EV_CQES [head++ & mask]); while (head != tail); EV_CQ_VAR (head) = head; ECB_MEMORY_FENCE_RELEASE; return 1; } static void iouring_poll (EV_P_ ev_tstamp timeout) { /* if we have events, no need for extra syscalls, but we might have to queue events */ /* we also clar the timeout if there are outstanding fdchanges */ /* the latter should only happen if both the sq and cq are full, most likely */ /* because we have a lot of event sources that immediately complete */ /* TODO: fdchacngecnt is always 0 because fd_reify does not have two buffers yet */ if (iouring_handle_cq (EV_A) || fdchangecnt) timeout = EV_TS_CONST (0.); else /* no events, so maybe wait for some */ iouring_tfd_update (EV_A_ timeout); /* only enter the kernel if we have something to submit, or we need to wait */ if (timeout || iouring_to_submit) { int res = iouring_enter (EV_A_ timeout); if (ecb_expect_false (res < 0)) if (errno == EINTR) /* ignore */; else if (errno == EBUSY) /* cq full, cannot submit - should be rare because we flush the cq first, so simply ignore */; else ev_syserr ("(libev) iouring setup"); else iouring_handle_cq (EV_A); } } inline_size int iouring_init (EV_P_ int flags) { iouring_entries = IOURING_INIT_ENTRIES; iouring_max_entries = 0; if (iouring_internal_init (EV_A) < 0) { iouring_internal_destroy (EV_A); return 0; } ev_io_init (&iouring_tfd_w, iouring_tfd_cb, iouring_tfd, EV_READ); ev_set_priority (&iouring_tfd_w, EV_MINPRI); ev_io_start (EV_A_ &iouring_tfd_w); ev_unref (EV_A); /* watcher should not keep loop alive */ backend_modify = iouring_modify; backend_poll = iouring_poll; return EVBACKEND_IOURING; } inline_size void iouring_destroy (EV_P) { iouring_internal_destroy (EV_A); } nio4r-2.7.3/ext/libev/Changes0000644000004100000410000007750314632135320016015 0ustar www-datawww-dataRevision history for libev, a high-performance and full-featured event loop. TODO: for next ABI/API change, consider moving EV__IOFDSSET into io->fd instead and provide a getter. TODO: document EV_TSTAMP_T 4.33 Wed Mar 18 13:22:29 CET 2020 - no changes w.r.t. 4.32. 4.32 (EV only) - the 4.31 timerfd code wrongly changed the priority of the signal fd watcher, which is usually harmless unless signal fds are also used (found via cpan tester service). - the documentation wrongly claimed that user may modify fd and events members in io watchers when the watcher was stopped (found by b_jonas). - new ev_io_modify mutator which changes only the events member, which can be faster. also added ev::io::set (int events) method to ev++.h. - officially allow a zero events mask for io watchers. this should work with older libev versions as well but was not officially allowed before. - do not wake up every minute when timerfd is used to detect timejumps. - do not wake up every minute when periodics are disabled and we have a monotonic clock. - support a lot more "uncommon" compile time configurations, such as ev_embed enabled but ev_timer disabled. - use a start/stop wrapper class to reduce code duplication in ev++.h and make it needlessly more c++-y. - the linux aio backend is no longer compiled in by default. - update to libecb version 0x00010008. 4.31 Fri Dec 20 21:58:29 CET 2019 - handle backends with minimum wait time a bit better by not waiting in the presence of already-expired timers (behaviour reported by Felipe Gasper). - new feature: use timerfd to detect timejumps quickly, can be disabled with the new EVFLAG_NOTIMERFD loop flag. - document EV_USE_SIGNALFD feature macro. 4.30 (EV only) - change non-autoconf test for __kernel_rwf_t by testing LINUX_VERSION_CODE, the most direct test I could find. - fix a bug in the io_uring backend that polled the wrong backend fd, causing it to not work in many cases. 4.29 (EV only) - add io uring autoconf and non-autoconf detection. - disable io_uring when some header files are too old. 4.28 (EV only) - linuxaio backend resulted in random memory corruption when loop is forked. - linuxaio backend might have tried to cancel an iocb multiple times (was unable to trigger this). - linuxaio backend now employs a generation counter to avoid handling spurious events from cancelled requests. - io_cancel can return EINTR, deal with it. also, assume io_submit also returns EINTR. - fix some other minor bugs in linuxaio backend. - ev_tstamp type can now be overriden by defining EV_TSTAMP_T. - cleanup: replace expect_true/false and noinline by their libecb counterparts. - move syscall infrastructure from ev_linuxaio.c to ev.c. - prepare io_uring integration. - tweak ev_floor. - epoll, poll, win32 Sleep and other places that use millisecond reslution now all try to round up times. - solaris port backend didn't compile. - abstract time constants into their macros, for more flexibility. 4.27 Thu Jun 27 22:43:44 CEST 2019 - linux aio backend almost completely rewritten to work around its limitations. - linux aio backend now requires linux 4.19+. - epoll backend now mandatory for linux aio backend. - fail assertions more aggressively on invalid fd's detected in the event loop, do not just silently fd_kill in case of user error. - ev_io_start/ev_io_stop now verify the watcher fd using a syscall when EV_VERIFY is 2 or higher. 4.26 (EV only) - update to libecb 0x00010006. - new experimental linux aio backend (linux 4.18+). - removed redundant 0-ptr check in ev_once. - updated/extended ev_set_allocator documentation. - replaced EMPTY2 macro by array_needsize_noinit. - minor code cleanups. - epoll backend now uses epoll_create1 also after fork. 4.25 Fri Dec 21 07:49:20 CET 2018 - INCOMPATIBLE CHANGE: EV_THROW was renamed to EV_NOEXCEPT (EV_THROW still provided) and now uses noexcept on C++11 or newer. - move the darwin select workaround higher in ev.c, as newer versions of darwin managed to break their broken select even more. - ANDROID => __ANDROID__ (reported by enh@google.com). - disable epoll_create1 on android because it has broken header files and google is unwilling to fix them (reported by enh@google.com). - avoid a minor compilation warning on win32. - c++: remove deprecated dynamic throw() specifications. - c++: improve the (unsupported) bad_loop exception class. - backport perl ev_periodic example to C, untested. - update libecb, biggets change is to include a memory fence in ECB_MEMORY_FENCE_RELEASE on x86/amd64. - minor autoconf/automake modernisation. 4.24 Wed Dec 28 05:19:55 CET 2016 - bump version to 4.24, as the release tarball inexplicably didn't have the right version in ev.h, even though the cvs-tagged version did have the right one (reported by Ales Teska). 4.23 Wed Nov 16 18:23:41 CET 2016 - move some declarations at the beginning to help certain retarded microsoft compilers, even though their documentation claims otherwise (reported by Ruslan Osmanov). 4.22 Sun Dec 20 22:11:50 CET 2015 - when epoll detects unremovable fds in the fd set, rebuild only the epoll descriptor, not the signal pipe, to avoid SIGPIPE in ev_async_send. This doesn't solve it on fork, so document what needs to be done in ev_loop_fork (analyzed by Benjamin Mahler). - remove superfluous sys/timeb.h include on win32 (analyzed by Jason Madden). - updated libecb. 4.20 Sat Jun 20 13:01:43 CEST 2015 - prefer noexcept over throw () with C++ 11. - update ecb.h due to incompatibilities with c11. - fix a potential aliasing issue when reading and writing watcher callbacks. 4.19 Thu Sep 25 08:18:25 CEST 2014 - ev.h wasn't valid C++ anymore, which tripped compilers other than clang, msvc or gcc (analyzed by Raphael 'kena' Poss). Unfortunately, C++ doesn't support typedefs for function pointers fully, so the affected declarations have to spell out the types each time. - when not using autoconf, tighten the check for clock_gettime and related functionality. 4.18 Fri Sep 5 17:55:26 CEST 2014 - events on files were not always generated properly with the epoll backend (testcase by Assaf Inbal). - mark event pipe fd as cloexec after a fork (analyzed by Sami Farin). - (ecb) support m68k, m88k and sh (patch by Miod Vallat). - use a reasonable fallback for EV_NSIG instead of erroring out when we can't detect the signal set size. - in the absence of autoconf, do not use the clock syscall on glibc >= 2.17 (avoids the syscall AND -lrt on systems doing clock_gettime in userspace). - ensure extern "C" function pointers are used for externally-visible loop callbacks (not watcher callbacks yet). - (ecb) work around memory barriers and volatile apparently both being broken in visual studio 2008 and later (analysed and patch by Nicolas Noble). 4.15 Fri Mar 1 12:04:50 CET 2013 - destroying a non-default loop would stop the global waitpid watcher (Denis Bilenko). - queueing pending watchers of higher priority from a watcher now invokes them in a timely fashion (reported by Denis Bilenko). - add throw() to all libev functions that cannot throw exceptions, for further code size decrease when compiling for C++. - add throw () to callbacks that must not throw exceptions (allocator, syserr, loop acquire/release, periodic reschedule cbs). - fix event_base_loop return code, add event_get_callback, event_base_new, event_base_get_method calls to improve libevent 1.x emulation and add some libevent 2.x functionality (based on a patch by Jeff Davey). - add more memory fences to fix a bug reported by Jeff Davey. Better be overfenced than underprotected. - ev_run now returns a boolean status (true meaning watchers are still active). - ev_once: undef EV_ERROR in ev_kqueue.c, to avoid clashing with libev's EV_ERROR (reported by 191919). - (ecb) add memory fence support for xlC (Darin McBride). - (ecb) add memory fence support for gcc-mips (Anton Kirilov). - (ecb) add memory fence support for gcc-alpha (Christian Weisgerber). - work around some kernels losing file descriptors by leaking the kqueue descriptor in the child. - work around linux inotify not reporting IN_ATTRIB changes for directories in many cases. - include sys/syscall.h instead of plain syscall.h. - check for io watcher loops in ev_verify, check for the most common reported usage bug in ev_io_start. - choose socket vs. WSASocket at compiletime using EV_USE_WSASOCKET. - always use WSASend/WSARecv directly on windows, hoping that this works in all cases (unlike read/write/send/recv...). - try to detect signals around a fork faster (test program by Denis Bilenko). - work around recent glibc versions that leak memory in realloc. - rename ev::embed::set to ev::embed::set_embed to avoid clashing the watcher base set (loop) method. - rewrite the async/signal pipe logic to always keep a valid fd, which simplifies (and hopefully correctifies :) the race checking on fork, at the cost of one extra fd. - add fat, msdos, jffs2, ramfs, ntfs and btrfs to the list of inotify-supporting filesystems. - move orig_CFLAGS assignment to after AC_INIT, as newer autoconf versions ignore it before (https://bugzilla.redhat.com/show_bug.cgi?id=908096). - add some untested android support. - enum expressions must be of type int (reported by Juan Pablo L). 4.11 Sat Feb 4 19:52:39 CET 2012 - INCOMPATIBLE CHANGE: ev_timer_again now clears the pending status, as was documented already, but not implemented in the repeating case. - new compiletime symbols: EV_NO_SMP and EV_NO_THREADS. - fix a race where the workaround against the epoll fork bugs caused signals to not be handled anymore. - correct backend_fudge for most backends, and implement a windows specific workaround to avoid looping because we call both select and Sleep, both with different time resolutions. - document range and guarantees of ev_sleep. - document reasonable ranges for periodics interval and offset. - rename backend_fudge to backend_mintime to avoid future confusion :) - change the default periodic reschedule function to hopefully be more exact and correct even in corner cases or in the far future. - do not rely on -lm anymore: use it when available but use our own floor () if it is missing. This should make it easier to embed, as no external libraries are required. - strategically import macros from libecb and mark rarely-used functions as cache-cold (saving almost 2k code size on typical amd64 setups). - add Symbols.ev and Symbols.event files, that were missing. - fix backend_mintime value for epoll (was 1/1024, is 1/1000 now). - fix #3 "be smart about timeouts" to not "deadlock" when timeout == now, also improve the section overall. - avoid "AVOIDING FINISHING BEFORE RETURNING" idiom. - support new EV_API_STATIC mode to make all libev symbols static. - supply default CFLAGS of -g -O3 with gcc when original CFLAGS were empty. 4.04 Wed Feb 16 09:01:51 CET 2011 - fix two problems in the native win32 backend, where reuse of fd's with different underlying handles caused handles not to be removed or added to the select set (analyzed and tested by Bert Belder). - do no rely on ceil() in ev_e?poll.c. - backport libev to HP-UX versions before 11 v3. - configure did not detect nanosleep and clock_gettime properly when they are available in the libc (as opposed to -lrt). 4.03 Tue Jan 11 14:37:25 CET 2011 - officially support polling files with all backends. - support files, /dev/zero etc. the same way as select in the epoll backend, by generating events on our own. - ports backend: work around solaris bug 6874410 and many related ones (EINTR, maybe more), with no performance loss (note that the solaris bug report is actually wrong, reality is far more bizarre and broken than that). - define EV_READ/EV_WRITE as macros in event.h, as some programs use #ifdef to test for them. - new (experimental) function: ev_feed_signal. - new (to become default) EVFLAG_NOSIGMASK flag. - new EVBACKEND_MASK symbol. - updated COMMON IDIOMS SECTION. 4.01 Fri Nov 5 21:51:29 CET 2010 - automake fucked it up, apparently, --add-missing -f is not quite enough to make it update its files, so 4.00 didn't install ev++.h and event.h on make install. grrr. - ev_loop(count|depth) didn't return anything (Robin Haberkorn). - change EV_UNDEF to 0xffffffff to silence some overzealous compilers. - use "(libev) " prefix for all libev error messages now. 4.00 Mon Oct 25 12:32:12 CEST 2010 - "PORTING FROM LIBEV 3.X TO 4.X" (in ev.pod) is recommended reading. - ev_embed_stop did not correctly stop the watcher (very good testcase by Vladimir Timofeev). - ev_run will now always update the current loop time - it erroneously didn't when idle watchers were active, causing timers not to fire. - fix a bug where a timeout of zero caused the timer not to fire in the libevent emulation (testcase by Péter Szabó). - applied win32 fixes by Michael Lenaghan (also James Mansion). - replace EV_MINIMAL by EV_FEATURES. - prefer EPOLL_CTL_ADD over EPOLL_CTL_MOD in some more cases, as it seems the former is *much* faster than the latter. - linux kernel version detection (for inotify bug workarounds) did not work properly. - reduce the number of spurious wake-ups with the ports backend. - remove dependency on sys/queue.h on freebsd (patch by Vanilla Hsu). - do async init within ev_async_start, not ev_async_set, which avoids an API quirk where the set function must be called in the C++ API even when there is nothing to set. - add (undocumented) EV_ENABLE when adding events with kqueue, this might help with OS X, which seems to need it despite documenting not to need it (helpfully pointed out by Tilghman Lesher). - do not use poll by default on freebsd, it's broken (what isn't on freebsd...). - allow to embed epoll on kernels >= 2.6.32. - configure now prepends -O3, not appends it, so one can still override it. - ev.pod: greatly expanded the portability section, added a porting section, a description of watcher states and made lots of minor fixes. - disable poll backend on AIX, the poll header spams the namespace and it's not worth working around dead platforms (reported and analyzed by Aivars Kalvans). - improve header file compatibility of the standalone eventfd code in an obscure case. - implement EV_AVOID_STDIO option. - do not use sscanf to parse linux version number (smaller, faster, no sscanf dependency). - new EV_CHILD_ENABLE and EV_SIGNAL_ENABLE configurable settings. - update libev.m4 HAVE_CLOCK_SYSCALL test for newer glibcs. - add section on accept() problems to the manpage. - rename EV_TIMEOUT to EV_TIMER. - rename ev_loop_count/depth/verify/loop/unloop. - remove ev_default_destroy and ev_default_fork. - switch to two-digit minor version. - work around an apparent gentoo compiler bug. - define _DARWIN_UNLIMITED_SELECT. just so. - use enum instead of #define for most constants. - improve compatibility to older C++ compilers. - (experimental) ev_run/ev_default_loop/ev_break/ev_loop_new have now default arguments when compiled as C++. - enable automake dependency tracking. - ev_loop_new no longer leaks memory when loop creation failed. - new ev_cleanup watcher type. 3.9 Thu Dec 31 07:59:59 CET 2009 - signalfd is no longer used by default and has to be requested explicitly - this means that easy to catch bugs become hard to catch race conditions, but the users have spoken. - point out the unspecified signal mask in the documentation, and that this is a race condition regardless of EV_SIGNALFD. - backport inotify code to C89. - inotify file descriptors could leak into child processes. - ev_stat watchers could keep an erroneous extra ref on the loop, preventing exit when unregistering all watchers (testcases provided by ry@tinyclouds.org). - implement EV_WIN32_HANDLE_TO_FD and EV_WIN32_CLOSE_FD configuration symbols to make it easier for apps to do their own fd management. - support EV_IDLE_ENABLE being disabled in ev++.h (patch by Didier Spezia). - take advantage of inotify_init1, if available, to set cloexec/nonblock on fd creation, to avoid races. - the signal handling pipe wasn't always initialised under windows (analysed by lekma). - changed minimum glibc requirement from glibc 2.9 to 2.7, for signalfd. - add missing string.h include (Denis F. Latypoff). - only replace ev_stat.prev when we detect an actual difference, so prev is (almost) always different to attr. this might have caused the problems with 04_stat.t. - add ev::timer->remaining () method to C++ API. 3.8 Sun Aug 9 14:30:45 CEST 2009 - incompatible change: do not necessarily reset signal handler to SIG_DFL when a sighandler is stopped. - ev_default_destroy did not properly free or zero some members, potentially causing crashes and memory corruption on repeated ev_default_destroy/ev_default_loop calls. - take advantage of signalfd on GNU/Linux systems. - document that the signal mask might be in an unspecified state when using libev's signal handling. - take advantage of some GNU/Linux calls to set cloexec/nonblock on fd creation, to avoid race conditions. 3.7 Fri Jul 17 16:36:32 CEST 2009 - ev_unloop and ev_loop wrongly used a global variable to exit loops, instead of using a per-loop variable (bug caught by accident...). - the ev_set_io_collect_interval interpretation has changed. - add new functionality: ev_set_userdata, ev_userdata, ev_set_invoke_pending_cb, ev_set_loop_release_cb, ev_invoke_pending, ev_pending_count, together with a long example about thread locking. - add ev_timer_remaining (as requested by Denis F. Latypoff). - add ev_loop_depth. - calling ev_unloop in fork/prepare watchers will no longer poll for new events. - Denis F. Latypoff corrected many typos in example code snippets. - honor autoconf detection of EV_USE_CLOCK_SYSCALL, also double- check that the syscall number is available before trying to use it (reported by ry@tinyclouds). - use GetSystemTimeAsFileTime instead of _timeb on windows, for slightly higher accuracy. - properly declare ev_loop_verify and ev_now_update even when !EV_MULTIPLICITY. - do not compile in any priority code when EV_MAXPRI == EV_MINPRI. - support EV_MINIMAL==2 for a reduced API. - actually 0-initialise struct sigaction when installing signals. - add section on hibernate and stopped processes to ev_timer docs. 3.6 Tue Apr 28 02:49:30 CEST 2009 - multiple timers becoming ready within an event loop iteration will be invoked in the "correct" order now. - do not leave the event loop early just because we have no active watchers, fixing a problem when embedding a kqueue loop that has active kernel events but no registered watchers (reported by blacksand blacksand). - correctly zero the idx values for arrays, so destroying and reinitialising the default loop actually works (patch by Malek Hadj-Ali). - implement ev_suspend and ev_resume. - new EV_CUSTOM revents flag for use by applications. - add documentation section about priorities. - add a glossary to the documentation. - extend the ev_fork description slightly. - optimize a jump out of call_pending. 3.53 Sun Feb 15 02:38:20 CET 2009 - fix a bug in event pipe creation on win32 that would cause a failed assertion on event loop creation (patch by Malek Hadj-Ali). - probe for CLOCK_REALTIME support at runtime as well and fall back to gettimeofday if there is an error, to support older operating systems with newer header files/libraries. - prefer gettimeofday over clock_gettime with USE_CLOCK_SYSCALL (default most everywhere), otherwise not. 3.52 Wed Jan 7 21:43:02 CET 2009 - fix compilation of select backend in fd_set mode when NFDBITS is missing (to get it to compile on QNX, reported by Rodrigo Campos). - better select-nfds handling when select backend is in fd_set mode. - diagnose fd_set overruns when select backend is in fd_set mode. - due to a thinko, instead of disabling everything but select on the borked OS X platform, everything but select was allowed (reported by Emanuele Giaquinta). - actually verify that local and remote port are matching in libev's socketpair emulation, which makes denial-of-service attacks harder (but not impossible - it's windows). Make sure it even works under vista, which thinks that getpeer/sockname should return fantasy port numbers. - include "libev" in all assertion messages for potentially clearer diagnostics. - event_get_version (libevent compatibility) returned a useless string instead of the expected version string (patch by W.C.A. Wijngaards). 3.51 Wed Dec 24 23:00:11 CET 2008 - fix a bug where an inotify watcher was added twice, causing freezes on hash collisions (reported and analysed by Graham Leggett). - new config symbol, EV_USE_CLOCK_SYSCALL, to make libev use a direct syscall - slower, but no dependency on librt et al. - assume negative return values != -1 signals success of port_getn (http://cvs.epicsol.org/cgi/viewcvs.cgi/epic5/source/newio.c?rev=1.52) (no known failure reports, but it doesn't hurt). - fork detection in ev_embed now stops and restarts the watcher automatically. - EXPERIMENTAL: default the method to operator () in ev++.h, to make it nicer to use functors (requested by Benedek László). - fixed const object callbacks in ev++.h. - replaced loop_ref argument of watcher.set (loop) by a direct ev_loop * in ev++.h, to avoid clashes with functor patch. - do not try to watch the empty string via inotify. - inotify watchers could be leaked under certain circumstances. - OS X 10.5 is actually even more broken than earlier versions, so fall back to select on that piece of garbage. - fixed some weirdness in the ev_embed documentation. 3.49 Wed Nov 19 11:26:53 CET 2008 - ev_stat watchers will now use inotify as a mere hint on kernels <2.6.25, or if the filesystem is not in the "known to be good" list. - better mingw32 compatibility (it's not as borked as native win32) (analysed by Roger Pack). - include stdio.h in the example program, as too many people are confused by the weird C language otherwise. I guess the next thing I get told is that the "..." ellipses in the examples don't compile with their C compiler. 3.48 Thu Oct 30 09:02:37 CET 2008 - further optimise away the EPOLL_CTL_ADD/MOD combo in the epoll backend by assuming the kernel event mask hasn't changed if ADD fails with EEXIST. - work around spurious event notification bugs in epoll by using a 32-bit generation counter. recreate kernel state if we receive spurious notifications or unwanted events. this is very costly, but I didn't come up with this horrible design. - use memset to initialise most arrays now and do away with the init functions. - expand time-out strategies into a "Be smart about timeouts" section. - drop the "struct" from all ev_watcher declarations in the documentation and did other clarifications (yeah, it was a mistake to have a struct AND a function called ev_loop). - fix a bug where ev_default would not initialise the default loop again after it was destroyed with ev_default_destroy. - rename syserr to ev_syserr to avoid name clashes when embedding, do similar changes for event.c. 3.45 Tue Oct 21 21:59:26 CEST 2008 - disable inotify usage on linux <2.6.25, as it is broken (reported by Yoann Vandoorselaere). - ev_stat erroneously would try to add inotify watchers even when inotify wasn't available (this should only have a performance impact). - ev_once now passes both timeout and io to the callback if both occur concurrently, instead of giving timeouts precedence. - disable EV_USE_INOTIFY when sys/inotify.h is too old. 3.44 Mon Sep 29 05:18:39 CEST 2008 - embed watchers now automatically invoke ev_loop_fork on the embedded loop when the parent loop forks. - new function: ev_now_update (loop). - verify_watcher was not marked static. - improve the "associating..." manpage section. - documentation tweaks here and there. 3.43 Sun Jul 6 05:34:41 CEST 2008 - include more include files on windows to get struct _stati64 (reported by Chris Hulbert, but doesn't quite fix his issue). - add missing #include in ev.c on windows (reported by Matt Tolton). 3.42 Tue Jun 17 12:12:07 CEST 2008 - work around yet another windows bug: FD_SET actually adds fd's multiple times to the fd_*SET*, despite official MSN docs claiming otherwise. Reported and well-analysed by Matt Tolton. - define NFDBITS to 0 when EV_SELECT_IS_WINSOCKET to make it compile (reported any analysed by Chris Hulbert). - fix a bug in ev_ebadf (this function is only used to catch programming errors in the libev user). reported by Matt Tolton. - fix a bug in fd_intern on win32 (could lead to compile errors under some circumstances, but would work correctly if it compiles). reported by Matt Tolton. - (try to) work around missing lstat on windows. - pass in the write fd set as except fd set under windows. windows is so uncontrollably lame that it requires this. this means that switching off oobinline is not supported (but tcp/ip doesn't have oob, so that would be stupid anyways. - use posix module symbol to auto-detect monotonic clock presence and some other default values. 3.41 Fri May 23 18:42:54 CEST 2008 - work around an obscure bug in winsocket select: if you provide only empty fd sets then select returns WSAEINVAL. how sucky. - improve timer scheduling stability and reduce use of time_epsilon. - use 1-based 2-heap for EV_MINIMAL, simplifies code, reduces codesize and makes for better cache-efficiency. - use 3-based 4-heap for !EV_MINIMAL. this makes better use of cpu cache lines and gives better growth behaviour than 2-based heaps. - cache timestamp within heap for !EV_MINIMAL, to avoid random memory accesses. - document/add EV_USE_4HEAP and EV_HEAP_CACHE_AT. - fix a potential aliasing issue in ev_timer_again. - add/document ev_periodic_at, retract direct access to ->at. - improve ev_stat docs. - add portability requirements section. - fix manpage headers etc. - normalise WSA error codes to lower range on windows. - add consistency check code that can be called automatically or on demand to check for internal structures (ev_loop_verify). 3.31 Wed Apr 16 20:45:04 CEST 2008 - added last minute fix for ev_poll.c by Brandon Black. 3.3 Wed Apr 16 19:04:10 CEST 2008 - event_base_loopexit should return 0 on success (W.C.A. Wijngaards). - added linux eventfd support. - try to autodetect epoll and inotify support by libc header version if not using autoconf. - new symbols: EV_DEFAULT_UC and EV_DEFAULT_UC_. - declare functions defined in ev.h as inline if C99 or gcc are available. - enable inlining with gcc versions 2 and 3. - work around broken poll implementations potentially not clearing revents field in ev_poll (Brandon Black) (no such systems are known at this time). - work around a bug in realloc on openbsd and darwin, also makes the erroneous valgrind complaints go away (noted by various people). - fix ev_async_pending, add c++ wrapper for ev_async (based on patch sent by Johannes Deisenhofer). - add sensible set method to ev::embed. - made integer constants type int in ev.h. 3.2 Wed Apr 2 17:11:19 CEST 2008 - fix a 64 bit overflow issue in the select backend, by using fd_mask instead of int for the mask. - rename internal sighandler to avoid clash with very old perls. - entering ev_loop will not clear the ONESHOT or NONBLOCKING flags of any outer loops anymore. - add ev_async_pending. 3.1 Thu Mar 13 13:45:22 CET 2008 - implement ev_async watchers. - only initialise signal pipe on demand. - make use of sig_atomic_t configurable. - improved documentation. 3.0 Mon Jan 28 13:14:47 CET 2008 - API/ABI bump to version 3.0. - ev++.h includes "ev.h" by default now, not . - slightly improved documentation. - speed up signal detection after a fork. - only optionally return trace status changed in ev_child watchers. - experimental (and undocumented) loop wrappers for ev++.h. 2.01 Tue Dec 25 08:04:41 CET 2007 - separate Changes file. - fix ev_path_set => ev_stat_set typo. - remove event_compat.h from the libev tarball. - change how include files are found. - doc updates. - update licenses, explicitly allow for GPL relicensing. 2.0 Sat Dec 22 17:47:03 CET 2007 - new ev_sleep, ev_set_(io|timeout)_collect_interval. - removed epoll from embeddable fd set. - fix embed watchers. - renamed ev_embed.loop to other. - added exported Symbol tables. - undefine member wrapper macros at the end of ev.c. - respect EV_H in ev++.h. 1.86 Tue Dec 18 02:36:57 CET 2007 - fix memleak on loop destroy (not relevant for perl). 1.85 Fri Dec 14 20:32:40 CET 2007 - fix some aliasing issues w.r.t. timers and periodics (not relevant for perl). (for historic versions refer to EV/Changes, found in the Perl interface) 0.1 Wed Oct 31 21:31:48 CET 2007 - original version; hacked together in <24h. nio4r-2.7.3/ext/libev/ev_win32.c0000644000004100000410000001234214632135320016310 0ustar www-datawww-data/* * libev win32 compatibility cruft (_not_ a backend) * * Copyright (c) 2007,2008,2009 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #ifdef _WIN32 /* note: the comment below could not be substantiated, but what would I care */ /* MSDN says this is required to handle SIGFPE */ /* my wild guess would be that using something floating-pointy is required */ /* for the crt to do something about it */ volatile double SIGFPE_REQ = 0.0f; static SOCKET ev_tcp_socket (void) { #if EV_USE_WSASOCKET return WSASocket (AF_INET, SOCK_STREAM, 0, 0, 0, 0); #else return socket (AF_INET, SOCK_STREAM, 0); #endif } /* oh, the humanity! */ static int ev_pipe (int filedes [2]) { struct sockaddr_in addr = { 0 }; int addr_size = sizeof (addr); struct sockaddr_in adr2; int adr2_size = sizeof (adr2); SOCKET listener; SOCKET sock [2] = { -1, -1 }; if ((listener = ev_tcp_socket ()) == INVALID_SOCKET) return -1; addr.sin_family = AF_INET; addr.sin_addr.s_addr = htonl (INADDR_LOOPBACK); addr.sin_port = 0; if (bind (listener, (struct sockaddr *)&addr, addr_size)) goto fail; if (getsockname (listener, (struct sockaddr *)&addr, &addr_size)) goto fail; if (listen (listener, 1)) goto fail; if ((sock [0] = ev_tcp_socket ()) == INVALID_SOCKET) goto fail; if (connect (sock [0], (struct sockaddr *)&addr, addr_size)) goto fail; /* TODO: returns INVALID_SOCKET on winsock accept, not < 0. fix it */ /* when convenient, probably by just removing error checking altogether? */ if ((sock [1] = accept (listener, 0, 0)) < 0) goto fail; /* windows vista returns fantasy port numbers for sockets: * example for two interconnected tcp sockets: * * (Socket::unpack_sockaddr_in getsockname $sock0)[0] == 53364 * (Socket::unpack_sockaddr_in getpeername $sock0)[0] == 53363 * (Socket::unpack_sockaddr_in getsockname $sock1)[0] == 53363 * (Socket::unpack_sockaddr_in getpeername $sock1)[0] == 53365 * * wow! tridirectional sockets! * * this way of checking ports seems to work: */ if (getpeername (sock [0], (struct sockaddr *)&addr, &addr_size)) goto fail; if (getsockname (sock [1], (struct sockaddr *)&adr2, &adr2_size)) goto fail; errno = WSAEINVAL; if (addr_size != adr2_size || addr.sin_addr.s_addr != adr2.sin_addr.s_addr /* just to be sure, I mean, it's windows */ || addr.sin_port != adr2.sin_port) goto fail; closesocket (listener); #if EV_SELECT_IS_WINSOCKET filedes [0] = EV_WIN32_HANDLE_TO_FD (sock [0]); filedes [1] = EV_WIN32_HANDLE_TO_FD (sock [1]); #else /* when select isn't winsocket, we also expect socket, connect, accept etc. * to work on fds */ filedes [0] = sock [0]; filedes [1] = sock [1]; #endif return 0; fail: closesocket (listener); if (sock [0] != INVALID_SOCKET) closesocket (sock [0]); if (sock [1] != INVALID_SOCKET) closesocket (sock [1]); return -1; } #undef pipe #define pipe(filedes) ev_pipe (filedes) #define EV_HAVE_EV_TIME 1 ev_tstamp ev_time (void) { FILETIME ft; ULARGE_INTEGER ui; GetSystemTimeAsFileTime (&ft); ui.u.LowPart = ft.dwLowDateTime; ui.u.HighPart = ft.dwHighDateTime; /* also, msvc cannot convert ulonglong to double... yes, it is that sucky */ return EV_TS_FROM_USEC (((LONGLONG)(ui.QuadPart - 116444736000000000) * 1e-1)); } #endif nio4r-2.7.3/ext/libev/ev_kqueue.c0000644000004100000410000001566014632135320016653 0ustar www-datawww-data/* * libev kqueue backend * * Copyright (c) 2007,2008,2009,2010,2011,2012,2013,2016,2019 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #include #include #include #include #include inline_speed void kqueue_change (EV_P_ int fd, int filter, int flags, int fflags) { ++kqueue_changecnt; array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, array_needsize_noinit); EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); } /* OS X at least needs this */ #ifndef EV_ENABLE # define EV_ENABLE 0 #endif #ifndef NOTE_EOF # define NOTE_EOF 0 #endif static void kqueue_modify (EV_P_ int fd, int oev, int nev) { if (oev != nev) { if (oev & EV_READ) kqueue_change (EV_A_ fd, EVFILT_READ , EV_DELETE, 0); if (oev & EV_WRITE) kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_DELETE, 0); } /* to detect close/reopen reliably, we have to re-add */ /* event requests even when oev == nev */ if (nev & EV_READ) kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD | EV_ENABLE, NOTE_EOF); if (nev & EV_WRITE) kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD | EV_ENABLE, NOTE_EOF); } static void kqueue_poll (EV_P_ ev_tstamp timeout) { int res, i; struct timespec ts; /* need to resize so there is enough space for errors */ if (kqueue_changecnt > kqueue_eventmax) { ev_free (kqueue_events); kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt); kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); } EV_RELEASE_CB; EV_TS_SET (ts, timeout); res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); EV_ACQUIRE_CB; kqueue_changecnt = 0; if (ecb_expect_false (res < 0)) { if (errno != EINTR) ev_syserr ("(libev) kqueue kevent"); return; } for (i = 0; i < res; ++i) { int fd = kqueue_events [i].ident; if (ecb_expect_false (kqueue_events [i].flags & EV_ERROR)) { int err = kqueue_events [i].data; /* we are only interested in errors for fds that we are interested in :) */ if (anfds [fd].events) { if (err == ENOENT) /* resubmit changes on ENOENT */ kqueue_modify (EV_A_ fd, 0, anfds [fd].events); else if (err == EBADF) /* on EBADF, we re-check the fd */ { if (fd_valid (fd)) kqueue_modify (EV_A_ fd, 0, anfds [fd].events); else { assert (("libev: kqueue found invalid fd", 0)); fd_kill (EV_A_ fd); } } else /* on all other errors, we error out on the fd */ { assert (("libev: kqueue found invalid fd", 0)); fd_kill (EV_A_ fd); } } } else fd_event ( EV_A_ fd, kqueue_events [i].filter == EVFILT_READ ? EV_READ : kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE : 0 ); } if (ecb_expect_false (res == kqueue_eventmax)) { ev_free (kqueue_events); kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_eventmax + 1); kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); } } inline_size int kqueue_init (EV_P_ int flags) { /* initialize the kernel queue */ kqueue_fd_pid = getpid (); if ((backend_fd = kqueue ()) < 0) return 0; fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ backend_mintime = EV_TS_CONST (1e-9); /* apparently, they did the right thing in freebsd */ backend_modify = kqueue_modify; backend_poll = kqueue_poll; kqueue_eventmax = 64; /* initial number of events receivable per poll */ kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); kqueue_changes = 0; kqueue_changemax = 0; kqueue_changecnt = 0; return EVBACKEND_KQUEUE; } inline_size void kqueue_destroy (EV_P) { ev_free (kqueue_events); ev_free (kqueue_changes); } inline_size void kqueue_fork (EV_P) { /* some BSD kernels don't just destroy the kqueue itself, * but also close the fd, which isn't documented, and * impossible to support properly. * we remember the pid of the kqueue call and only close * the fd if the pid is still the same. * this leaks fds on sane kernels, but BSD interfaces are * notoriously buggy and rarely get fixed. */ pid_t newpid = getpid (); if (newpid == kqueue_fd_pid) close (backend_fd); kqueue_fd_pid = newpid; while ((backend_fd = kqueue ()) < 0) ev_syserr ("(libev) kqueue"); fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* re-register interest in fds */ fd_rearm_all (EV_A); } /* sys/event.h defines EV_ERROR */ #undef EV_ERROR nio4r-2.7.3/ext/libev/ev_poll.c0000644000004100000410000001102314632135320016307 0ustar www-datawww-data/* * libev poll fd activity backend * * Copyright (c) 2007,2008,2009,2010,2011,2016,2019 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #include inline_size void array_needsize_pollidx (int *base, int offset, int count) { /* using memset (.., -1, ...) is tempting, we we try * to be ultraportable */ base += offset; while (count--) *base++ = -1; } static void poll_modify (EV_P_ int fd, int oev, int nev) { int idx; if (oev == nev) return; array_needsize (int, pollidxs, pollidxmax, fd + 1, array_needsize_pollidx); idx = pollidxs [fd]; if (idx < 0) /* need to allocate a new pollfd */ { pollidxs [fd] = idx = pollcnt++; array_needsize (struct pollfd, polls, pollmax, pollcnt, array_needsize_noinit); polls [idx].fd = fd; } assert (polls [idx].fd == fd); if (nev) polls [idx].events = (nev & EV_READ ? POLLIN : 0) | (nev & EV_WRITE ? POLLOUT : 0); else /* remove pollfd */ { pollidxs [fd] = -1; if (ecb_expect_true (idx < --pollcnt)) { polls [idx] = polls [pollcnt]; pollidxs [polls [idx].fd] = idx; } } } static void poll_poll (EV_P_ ev_tstamp timeout) { struct pollfd *p; int res; EV_RELEASE_CB; res = poll (polls, pollcnt, EV_TS_TO_MSEC (timeout)); EV_ACQUIRE_CB; if (ecb_expect_false (res < 0)) { if (errno == EBADF) fd_ebadf (EV_A); else if (errno == ENOMEM && !syserr_cb) fd_enomem (EV_A); else if (errno != EINTR) ev_syserr ("(libev) poll"); } else for (p = polls; res; ++p) { assert (("libev: poll returned illegal result, broken BSD kernel?", p < polls + pollcnt)); if (ecb_expect_false (p->revents)) /* this expect is debatable */ { --res; if (ecb_expect_false (p->revents & POLLNVAL)) { assert (("libev: poll found invalid fd in poll set", 0)); fd_kill (EV_A_ p->fd); } else fd_event ( EV_A_ p->fd, (p->revents & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) | (p->revents & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) ); } } } inline_size int poll_init (EV_P_ int flags) { backend_mintime = EV_TS_CONST (1e-3); backend_modify = poll_modify; backend_poll = poll_poll; pollidxs = 0; pollidxmax = 0; polls = 0; pollmax = 0; pollcnt = 0; return EVBACKEND_POLL; } inline_size void poll_destroy (EV_P) { ev_free (pollidxs); ev_free (polls); } nio4r-2.7.3/ext/libev/ev_vars.h0000644000004100000410000001657514632135320016342 0ustar www-datawww-data/* * loop member variable declarations * * Copyright (c) 2007,2008,2009,2010,2011,2012,2013,2019 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #define VARx(type,name) VAR(name, type name) VARx(ev_tstamp, now_floor) /* last time we refreshed rt_time */ VARx(ev_tstamp, mn_now) /* monotonic clock "now" */ VARx(ev_tstamp, rtmn_diff) /* difference realtime - monotonic time */ /* for reverse feeding of events */ VARx(W *, rfeeds) VARx(int, rfeedmax) VARx(int, rfeedcnt) VAR (pendings, ANPENDING *pendings [NUMPRI]) VAR (pendingmax, int pendingmax [NUMPRI]) VAR (pendingcnt, int pendingcnt [NUMPRI]) VARx(int, pendingpri) /* highest priority currently pending */ VARx(ev_prepare, pending_w) /* dummy pending watcher */ VARx(ev_tstamp, io_blocktime) VARx(ev_tstamp, timeout_blocktime) VARx(int, backend) VARx(int, activecnt) /* total number of active events ("refcount") */ VARx(EV_ATOMIC_T, loop_done) /* signal by ev_break */ VARx(int, backend_fd) VARx(ev_tstamp, backend_mintime) /* assumed typical timer resolution */ VAR (backend_modify, void (*backend_modify)(EV_P_ int fd, int oev, int nev)) VAR (backend_poll , void (*backend_poll)(EV_P_ ev_tstamp timeout)) VARx(ANFD *, anfds) VARx(int, anfdmax) VAR (evpipe, int evpipe [2]) VARx(ev_io, pipe_w) VARx(EV_ATOMIC_T, pipe_write_wanted) VARx(EV_ATOMIC_T, pipe_write_skipped) #if !defined(_WIN32) || EV_GENWRAP VARx(pid_t, curpid) #endif VARx(char, postfork) /* true if we need to recreate kernel state after fork */ #if EV_USE_SELECT || EV_GENWRAP VARx(void *, vec_ri) VARx(void *, vec_ro) VARx(void *, vec_wi) VARx(void *, vec_wo) #if defined(_WIN32) || EV_GENWRAP VARx(void *, vec_eo) #endif VARx(int, vec_max) #endif #if EV_USE_POLL || EV_GENWRAP VARx(struct pollfd *, polls) VARx(int, pollmax) VARx(int, pollcnt) VARx(int *, pollidxs) /* maps fds into structure indices */ VARx(int, pollidxmax) #endif #if EV_USE_EPOLL || EV_GENWRAP VARx(struct epoll_event *, epoll_events) VARx(int, epoll_eventmax) VARx(int *, epoll_eperms) VARx(int, epoll_epermcnt) VARx(int, epoll_epermmax) #endif #if EV_USE_LINUXAIO || EV_GENWRAP VARx(aio_context_t, linuxaio_ctx) VARx(int, linuxaio_iteration) VARx(struct aniocb **, linuxaio_iocbps) VARx(int, linuxaio_iocbpmax) VARx(struct iocb **, linuxaio_submits) VARx(int, linuxaio_submitcnt) VARx(int, linuxaio_submitmax) VARx(ev_io, linuxaio_epoll_w) #endif #if EV_USE_IOURING || EV_GENWRAP VARx(int, iouring_fd) VARx(unsigned, iouring_to_submit); VARx(int, iouring_entries) VARx(int, iouring_max_entries) VARx(void *, iouring_sq_ring) VARx(void *, iouring_cq_ring) VARx(void *, iouring_sqes) VARx(uint32_t, iouring_sq_ring_size) VARx(uint32_t, iouring_cq_ring_size) VARx(uint32_t, iouring_sqes_size) VARx(uint32_t, iouring_sq_head) VARx(uint32_t, iouring_sq_tail) VARx(uint32_t, iouring_sq_ring_mask) VARx(uint32_t, iouring_sq_ring_entries) VARx(uint32_t, iouring_sq_flags) VARx(uint32_t, iouring_sq_dropped) VARx(uint32_t, iouring_sq_array) VARx(uint32_t, iouring_cq_head) VARx(uint32_t, iouring_cq_tail) VARx(uint32_t, iouring_cq_ring_mask) VARx(uint32_t, iouring_cq_ring_entries) VARx(uint32_t, iouring_cq_overflow) VARx(uint32_t, iouring_cq_cqes) VARx(ev_tstamp, iouring_tfd_to) VARx(int, iouring_tfd) VARx(ev_io, iouring_tfd_w) #endif #if EV_USE_KQUEUE || EV_GENWRAP VARx(pid_t, kqueue_fd_pid) VARx(struct kevent *, kqueue_changes) VARx(int, kqueue_changemax) VARx(int, kqueue_changecnt) VARx(struct kevent *, kqueue_events) VARx(int, kqueue_eventmax) #endif #if EV_USE_PORT || EV_GENWRAP VARx(struct port_event *, port_events) VARx(int, port_eventmax) #endif #if EV_USE_IOCP || EV_GENWRAP VARx(HANDLE, iocp) #endif VARx(int *, fdchanges) VARx(int, fdchangemax) VARx(int, fdchangecnt) VARx(ANHE *, timers) VARx(int, timermax) VARx(int, timercnt) #if EV_PERIODIC_ENABLE || EV_GENWRAP VARx(ANHE *, periodics) VARx(int, periodicmax) VARx(int, periodiccnt) #endif #if EV_IDLE_ENABLE || EV_GENWRAP VAR (idles, ev_idle **idles [NUMPRI]) VAR (idlemax, int idlemax [NUMPRI]) VAR (idlecnt, int idlecnt [NUMPRI]) #endif VARx(int, idleall) /* total number */ VARx(struct ev_prepare **, prepares) VARx(int, preparemax) VARx(int, preparecnt) VARx(struct ev_check **, checks) VARx(int, checkmax) VARx(int, checkcnt) #if EV_FORK_ENABLE || EV_GENWRAP VARx(struct ev_fork **, forks) VARx(int, forkmax) VARx(int, forkcnt) #endif #if EV_CLEANUP_ENABLE || EV_GENWRAP VARx(struct ev_cleanup **, cleanups) VARx(int, cleanupmax) VARx(int, cleanupcnt) #endif #if EV_ASYNC_ENABLE || EV_GENWRAP VARx(EV_ATOMIC_T, async_pending) VARx(struct ev_async **, asyncs) VARx(int, asyncmax) VARx(int, asynccnt) #endif #if EV_USE_INOTIFY || EV_GENWRAP VARx(int, fs_fd) VARx(ev_io, fs_w) VARx(char, fs_2625) /* whether we are running in linux 2.6.25 or newer */ VAR (fs_hash, ANFS fs_hash [EV_INOTIFY_HASHSIZE]) #endif VARx(EV_ATOMIC_T, sig_pending) #if EV_USE_SIGNALFD || EV_GENWRAP VARx(int, sigfd) VARx(ev_io, sigfd_w) VARx(sigset_t, sigfd_set) #endif #if EV_USE_TIMERFD || EV_GENWRAP VARx(int, timerfd) /* timerfd for time jump detection */ VARx(ev_io, timerfd_w) #endif VARx(unsigned int, origflags) /* original loop flags */ #if EV_FEATURE_API || EV_GENWRAP VARx(unsigned int, loop_count) /* total number of loop iterations/blocks */ VARx(unsigned int, loop_depth) /* #ev_run enters - #ev_run leaves */ VARx(void *, userdata) /* C++ doesn't support the ev_loop_callback typedef here. stinks. */ VAR (release_cb, void (*release_cb)(EV_P) EV_NOEXCEPT) VAR (acquire_cb, void (*acquire_cb)(EV_P) EV_NOEXCEPT) VAR (invoke_cb , ev_loop_callback invoke_cb) #endif #undef VARx nio4r-2.7.3/ext/libev/ev.h0000644000004100000410000007306214632135320015301 0ustar www-datawww-data/* * libev native API header * * Copyright (c) 2007-2020 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #ifndef EV_H_ #define EV_H_ #ifdef __cplusplus # define EV_CPP(x) x # if __cplusplus >= 201103L # define EV_NOEXCEPT noexcept # else # define EV_NOEXCEPT # endif #else # define EV_CPP(x) # define EV_NOEXCEPT #endif #define EV_THROW EV_NOEXCEPT /* pre-4.25, do not use in new code */ EV_CPP(extern "C" {) /*****************************************************************************/ /* pre-4.0 compatibility */ #ifndef EV_COMPAT3 # define EV_COMPAT3 1 #endif #ifndef EV_FEATURES # if defined __OPTIMIZE_SIZE__ # define EV_FEATURES 0x7c # else # define EV_FEATURES 0x7f # endif #endif #define EV_FEATURE_CODE ((EV_FEATURES) & 1) #define EV_FEATURE_DATA ((EV_FEATURES) & 2) #define EV_FEATURE_CONFIG ((EV_FEATURES) & 4) #define EV_FEATURE_API ((EV_FEATURES) & 8) #define EV_FEATURE_WATCHERS ((EV_FEATURES) & 16) #define EV_FEATURE_BACKENDS ((EV_FEATURES) & 32) #define EV_FEATURE_OS ((EV_FEATURES) & 64) /* these priorities are inclusive, higher priorities will be invoked earlier */ #ifndef EV_MINPRI # define EV_MINPRI (EV_FEATURE_CONFIG ? -2 : 0) #endif #ifndef EV_MAXPRI # define EV_MAXPRI (EV_FEATURE_CONFIG ? +2 : 0) #endif #ifndef EV_MULTIPLICITY # define EV_MULTIPLICITY EV_FEATURE_CONFIG #endif #ifndef EV_PERIODIC_ENABLE # define EV_PERIODIC_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_STAT_ENABLE # define EV_STAT_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_PREPARE_ENABLE # define EV_PREPARE_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_CHECK_ENABLE # define EV_CHECK_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_IDLE_ENABLE # define EV_IDLE_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_FORK_ENABLE # define EV_FORK_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_CLEANUP_ENABLE # define EV_CLEANUP_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_SIGNAL_ENABLE # define EV_SIGNAL_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_CHILD_ENABLE # ifdef _WIN32 # define EV_CHILD_ENABLE 0 # else # define EV_CHILD_ENABLE EV_FEATURE_WATCHERS #endif #endif #ifndef EV_ASYNC_ENABLE # define EV_ASYNC_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_EMBED_ENABLE # define EV_EMBED_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_WALK_ENABLE # define EV_WALK_ENABLE 0 /* not yet */ #endif /*****************************************************************************/ #if EV_CHILD_ENABLE && !EV_SIGNAL_ENABLE # undef EV_SIGNAL_ENABLE # define EV_SIGNAL_ENABLE 1 #endif /*****************************************************************************/ #ifndef EV_TSTAMP_T # define EV_TSTAMP_T double #endif typedef EV_TSTAMP_T ev_tstamp; #include /* for memmove */ #ifndef EV_ATOMIC_T # include # define EV_ATOMIC_T sig_atomic_t volatile #endif #if EV_STAT_ENABLE # ifdef _WIN32 # include # include # endif # include #endif /* support multiple event loops? */ #if EV_MULTIPLICITY struct ev_loop; # define EV_P struct ev_loop *loop /* a loop as sole parameter in a declaration */ # define EV_P_ EV_P, /* a loop as first of multiple parameters */ # define EV_A loop /* a loop as sole argument to a function call */ # define EV_A_ EV_A, /* a loop as first of multiple arguments */ # define EV_DEFAULT_UC ev_default_loop_uc_ () /* the default loop, if initialised, as sole arg */ # define EV_DEFAULT_UC_ EV_DEFAULT_UC, /* the default loop as first of multiple arguments */ # define EV_DEFAULT ev_default_loop (0) /* the default loop as sole arg */ # define EV_DEFAULT_ EV_DEFAULT, /* the default loop as first of multiple arguments */ #else # define EV_P void # define EV_P_ # define EV_A # define EV_A_ # define EV_DEFAULT # define EV_DEFAULT_ # define EV_DEFAULT_UC # define EV_DEFAULT_UC_ # undef EV_EMBED_ENABLE #endif /* EV_INLINE is used for functions in header files */ #if __STDC_VERSION__ >= 199901L || __GNUC__ >= 3 # define EV_INLINE static inline #else # define EV_INLINE static #endif #ifdef EV_API_STATIC # define EV_API_DECL static #else # define EV_API_DECL extern #endif /* EV_PROTOTYPES can be used to switch of prototype declarations */ #ifndef EV_PROTOTYPES # define EV_PROTOTYPES 1 #endif /*****************************************************************************/ #define EV_VERSION_MAJOR 4 #define EV_VERSION_MINOR 33 /* eventmask, revents, events... */ enum { EV_UNDEF = (int)0xFFFFFFFF, /* guaranteed to be invalid */ EV_NONE = 0x00, /* no events */ EV_READ = 0x01, /* ev_io detected read will not block */ EV_WRITE = 0x02, /* ev_io detected write will not block */ EV__IOFDSET = 0x80, /* internal use only */ EV_IO = EV_READ, /* alias for type-detection */ EV_TIMER = 0x00000100, /* timer timed out */ #if EV_COMPAT3 EV_TIMEOUT = EV_TIMER, /* pre 4.0 API compatibility */ #endif EV_PERIODIC = 0x00000200, /* periodic timer timed out */ EV_SIGNAL = 0x00000400, /* signal was received */ EV_CHILD = 0x00000800, /* child/pid had status change */ EV_STAT = 0x00001000, /* stat data changed */ EV_IDLE = 0x00002000, /* event loop is idling */ EV_PREPARE = 0x00004000, /* event loop about to poll */ EV_CHECK = 0x00008000, /* event loop finished poll */ EV_EMBED = 0x00010000, /* embedded event loop needs sweep */ EV_FORK = 0x00020000, /* event loop resumed in child */ EV_CLEANUP = 0x00040000, /* event loop resumed in child */ EV_ASYNC = 0x00080000, /* async intra-loop signal */ EV_CUSTOM = 0x01000000, /* for use by user code */ EV_ERROR = (int)0x80000000 /* sent when an error occurs */ }; /* can be used to add custom fields to all watchers, while losing binary compatibility */ #ifndef EV_COMMON # define EV_COMMON void *data; #endif #ifndef EV_CB_DECLARE # define EV_CB_DECLARE(type) void (*cb)(EV_P_ struct type *w, int revents); #endif #ifndef EV_CB_INVOKE # define EV_CB_INVOKE(watcher,revents) (watcher)->cb (EV_A_ (watcher), (revents)) #endif /* not official, do not use */ #define EV_CB(type,name) void name (EV_P_ struct ev_ ## type *w, int revents) /* * struct member types: * private: you may look at them, but not change them, * and they might not mean anything to you. * ro: can be read anytime, but only changed when the watcher isn't active. * rw: can be read and modified anytime, even when the watcher is active. * * some internal details that might be helpful for debugging: * * active is either 0, which means the watcher is not active, * or the array index of the watcher (periodics, timers) * or the array index + 1 (most other watchers) * or simply 1 for watchers that aren't in some array. * pending is either 0, in which case the watcher isn't, * or the array index + 1 in the pendings array. */ #if EV_MINPRI == EV_MAXPRI # define EV_DECL_PRIORITY #elif !defined (EV_DECL_PRIORITY) # define EV_DECL_PRIORITY int priority; #endif /* shared by all watchers */ #define EV_WATCHER(type) \ int active; /* private */ \ int pending; /* private */ \ EV_DECL_PRIORITY /* private */ \ EV_COMMON /* rw */ \ EV_CB_DECLARE (type) /* private */ #define EV_WATCHER_LIST(type) \ EV_WATCHER (type) \ struct ev_watcher_list *next; /* private */ #define EV_WATCHER_TIME(type) \ EV_WATCHER (type) \ ev_tstamp at; /* private */ /* base class, nothing to see here unless you subclass */ typedef struct ev_watcher { EV_WATCHER (ev_watcher) } ev_watcher; /* base class, nothing to see here unless you subclass */ typedef struct ev_watcher_list { EV_WATCHER_LIST (ev_watcher_list) } ev_watcher_list; /* base class, nothing to see here unless you subclass */ typedef struct ev_watcher_time { EV_WATCHER_TIME (ev_watcher_time) } ev_watcher_time; /* invoked when fd is either EV_READable or EV_WRITEable */ /* revent EV_READ, EV_WRITE */ typedef struct ev_io { EV_WATCHER_LIST (ev_io) int fd; /* ro */ int events; /* ro */ } ev_io; /* invoked after a specific time, repeatable (based on monotonic clock) */ /* revent EV_TIMEOUT */ typedef struct ev_timer { EV_WATCHER_TIME (ev_timer) ev_tstamp repeat; /* rw */ } ev_timer; /* invoked at some specific time, possibly repeating at regular intervals (based on UTC) */ /* revent EV_PERIODIC */ typedef struct ev_periodic { EV_WATCHER_TIME (ev_periodic) ev_tstamp offset; /* rw */ ev_tstamp interval; /* rw */ ev_tstamp (*reschedule_cb)(struct ev_periodic *w, ev_tstamp now) EV_NOEXCEPT; /* rw */ } ev_periodic; /* invoked when the given signal has been received */ /* revent EV_SIGNAL */ typedef struct ev_signal { EV_WATCHER_LIST (ev_signal) int signum; /* ro */ } ev_signal; /* invoked when sigchld is received and waitpid indicates the given pid */ /* revent EV_CHILD */ /* does not support priorities */ typedef struct ev_child { EV_WATCHER_LIST (ev_child) int flags; /* private */ int pid; /* ro */ int rpid; /* rw, holds the received pid */ int rstatus; /* rw, holds the exit status, use the macros from sys/wait.h */ } ev_child; #if EV_STAT_ENABLE /* st_nlink = 0 means missing file or other error */ # ifdef _WIN32 typedef struct _stati64 ev_statdata; # else typedef struct stat ev_statdata; # endif /* invoked each time the stat data changes for a given path */ /* revent EV_STAT */ typedef struct ev_stat { EV_WATCHER_LIST (ev_stat) ev_timer timer; /* private */ ev_tstamp interval; /* ro */ const char *path; /* ro */ ev_statdata prev; /* ro */ ev_statdata attr; /* ro */ int wd; /* wd for inotify, fd for kqueue */ } ev_stat; #endif /* invoked when the nothing else needs to be done, keeps the process from blocking */ /* revent EV_IDLE */ typedef struct ev_idle { EV_WATCHER (ev_idle) } ev_idle; /* invoked for each run of the mainloop, just before the blocking call */ /* you can still change events in any way you like */ /* revent EV_PREPARE */ typedef struct ev_prepare { EV_WATCHER (ev_prepare) } ev_prepare; /* invoked for each run of the mainloop, just after the blocking call */ /* revent EV_CHECK */ typedef struct ev_check { EV_WATCHER (ev_check) } ev_check; /* the callback gets invoked before check in the child process when a fork was detected */ /* revent EV_FORK */ typedef struct ev_fork { EV_WATCHER (ev_fork) } ev_fork; /* is invoked just before the loop gets destroyed */ /* revent EV_CLEANUP */ typedef struct ev_cleanup { EV_WATCHER (ev_cleanup) } ev_cleanup; #if EV_EMBED_ENABLE /* used to embed an event loop inside another */ /* the callback gets invoked when the event loop has handled events, and can be 0 */ typedef struct ev_embed { EV_WATCHER (ev_embed) struct ev_loop *other; /* ro */ #undef EV_IO_ENABLE #define EV_IO_ENABLE 1 ev_io io; /* private */ #undef EV_PREPARE_ENABLE #define EV_PREPARE_ENABLE 1 ev_prepare prepare; /* private */ ev_check check; /* unused */ ev_timer timer; /* unused */ ev_periodic periodic; /* unused */ ev_idle idle; /* unused */ ev_fork fork; /* private */ ev_cleanup cleanup; /* unused */ } ev_embed; #endif #if EV_ASYNC_ENABLE /* invoked when somebody calls ev_async_send on the watcher */ /* revent EV_ASYNC */ typedef struct ev_async { EV_WATCHER (ev_async) EV_ATOMIC_T sent; /* private */ } ev_async; # define ev_async_pending(w) (+(w)->sent) #endif /* the presence of this union forces similar struct layout */ union ev_any_watcher { struct ev_watcher w; struct ev_watcher_list wl; struct ev_io io; struct ev_timer timer; struct ev_periodic periodic; struct ev_signal signal; struct ev_child child; #if EV_STAT_ENABLE struct ev_stat stat; #endif #if EV_IDLE_ENABLE struct ev_idle idle; #endif struct ev_prepare prepare; struct ev_check check; #if EV_FORK_ENABLE struct ev_fork fork; #endif #if EV_CLEANUP_ENABLE struct ev_cleanup cleanup; #endif #if EV_EMBED_ENABLE struct ev_embed embed; #endif #if EV_ASYNC_ENABLE struct ev_async async; #endif }; /* flag bits for ev_default_loop and ev_loop_new */ enum { /* the default */ EVFLAG_AUTO = 0x00000000U, /* not quite a mask */ /* flag bits */ EVFLAG_NOENV = 0x01000000U, /* do NOT consult environment */ EVFLAG_FORKCHECK = 0x02000000U, /* check for a fork in each iteration */ /* debugging/feature disable */ EVFLAG_NOINOTIFY = 0x00100000U, /* do not attempt to use inotify */ #if EV_COMPAT3 EVFLAG_NOSIGFD = 0, /* compatibility to pre-3.9 */ #endif EVFLAG_SIGNALFD = 0x00200000U, /* attempt to use signalfd */ EVFLAG_NOSIGMASK = 0x00400000U, /* avoid modifying the signal mask */ EVFLAG_NOTIMERFD = 0x00800000U /* avoid creating a timerfd */ }; /* method bits to be ored together */ enum { EVBACKEND_SELECT = 0x00000001U, /* available just about anywhere */ EVBACKEND_POLL = 0x00000002U, /* !win, !aix, broken on osx */ EVBACKEND_EPOLL = 0x00000004U, /* linux */ EVBACKEND_KQUEUE = 0x00000008U, /* bsd, broken on osx */ EVBACKEND_DEVPOLL = 0x00000010U, /* solaris 8 */ /* NYI */ EVBACKEND_PORT = 0x00000020U, /* solaris 10 */ EVBACKEND_LINUXAIO = 0x00000040U, /* linux AIO, 4.19+ */ EVBACKEND_IOURING = 0x00000080U, /* linux io_uring, 5.1+ */ EVBACKEND_ALL = 0x000000FFU, /* all known backends */ EVBACKEND_MASK = 0x0000FFFFU /* all future backends */ }; #if EV_PROTOTYPES EV_API_DECL int ev_version_major (void) EV_NOEXCEPT; EV_API_DECL int ev_version_minor (void) EV_NOEXCEPT; EV_API_DECL unsigned int ev_supported_backends (void) EV_NOEXCEPT; EV_API_DECL unsigned int ev_recommended_backends (void) EV_NOEXCEPT; EV_API_DECL unsigned int ev_embeddable_backends (void) EV_NOEXCEPT; EV_API_DECL ev_tstamp ev_time (void) EV_NOEXCEPT; EV_API_DECL void ev_sleep (ev_tstamp delay) EV_NOEXCEPT; /* sleep for a while */ /* Sets the allocation function to use, works like realloc. * It is used to allocate and free memory. * If it returns zero when memory needs to be allocated, the library might abort * or take some potentially destructive action. * The default is your system realloc function. */ EV_API_DECL void ev_set_allocator (void *(*cb)(void *ptr, size_t size) EV_NOEXCEPT) EV_NOEXCEPT; /* set the callback function to call on a * retryable syscall error * (such as failed select, poll, epoll_wait) */ EV_API_DECL void ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT; #if EV_MULTIPLICITY /* the default loop is the only one that handles signals and child watchers */ /* you can call this as often as you like */ EV_API_DECL struct ev_loop *ev_default_loop (unsigned int flags EV_CPP (= 0)) EV_NOEXCEPT; #ifdef EV_API_STATIC EV_API_DECL struct ev_loop *ev_default_loop_ptr; #endif EV_INLINE struct ev_loop * ev_default_loop_uc_ (void) EV_NOEXCEPT { extern struct ev_loop *ev_default_loop_ptr; return ev_default_loop_ptr; } EV_INLINE int ev_is_default_loop (EV_P) EV_NOEXCEPT { return EV_A == EV_DEFAULT_UC; } /* create and destroy alternative loops that don't handle signals */ EV_API_DECL struct ev_loop *ev_loop_new (unsigned int flags EV_CPP (= 0)) EV_NOEXCEPT; EV_API_DECL ev_tstamp ev_now (EV_P) EV_NOEXCEPT; /* time w.r.t. timers and the eventloop, updated after each poll */ #else EV_API_DECL int ev_default_loop (unsigned int flags EV_CPP (= 0)) EV_NOEXCEPT; /* returns true when successful */ EV_API_DECL ev_tstamp ev_rt_now; EV_INLINE ev_tstamp ev_now (void) EV_NOEXCEPT { return ev_rt_now; } /* looks weird, but ev_is_default_loop (EV_A) still works if this exists */ EV_INLINE int ev_is_default_loop (void) EV_NOEXCEPT { return 1; } #endif /* multiplicity */ /* destroy event loops, also works for the default loop */ EV_API_DECL void ev_loop_destroy (EV_P); /* this needs to be called after fork, to duplicate the loop */ /* when you want to re-use it in the child */ /* you can call it in either the parent or the child */ /* you can actually call it at any time, anywhere :) */ EV_API_DECL void ev_loop_fork (EV_P) EV_NOEXCEPT; EV_API_DECL unsigned int ev_backend (EV_P) EV_NOEXCEPT; /* backend in use by loop */ EV_API_DECL void ev_now_update (EV_P) EV_NOEXCEPT; /* update event loop time */ #if EV_WALK_ENABLE /* walk (almost) all watchers in the loop of a given type, invoking the */ /* callback on every such watcher. The callback might stop the watcher, */ /* but do nothing else with the loop */ EV_API_DECL void ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_NOEXCEPT; #endif #endif /* prototypes */ /* ev_run flags values */ enum { EVRUN_NOWAIT = 1, /* do not block/wait */ EVRUN_ONCE = 2 /* block *once* only */ }; /* ev_break how values */ enum { EVBREAK_CANCEL = 0, /* undo unloop */ EVBREAK_ONE = 1, /* unloop once */ EVBREAK_ALL = 2 /* unloop all loops */ }; #if EV_PROTOTYPES EV_API_DECL int ev_run (EV_P_ int flags EV_CPP (= 0)); EV_API_DECL void ev_break (EV_P_ int how EV_CPP (= EVBREAK_ONE)) EV_NOEXCEPT; /* break out of the loop */ /* * ref/unref can be used to add or remove a refcount on the mainloop. every watcher * keeps one reference. if you have a long-running watcher you never unregister that * should not keep ev_loop from running, unref() after starting, and ref() before stopping. */ EV_API_DECL void ev_ref (EV_P) EV_NOEXCEPT; EV_API_DECL void ev_unref (EV_P) EV_NOEXCEPT; /* * convenience function, wait for a single event, without registering an event watcher * if timeout is < 0, do wait indefinitely */ EV_API_DECL void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) EV_NOEXCEPT; EV_API_DECL void ev_invoke_pending (EV_P); /* invoke all pending watchers */ # if EV_FEATURE_API EV_API_DECL unsigned int ev_iteration (EV_P) EV_NOEXCEPT; /* number of loop iterations */ EV_API_DECL unsigned int ev_depth (EV_P) EV_NOEXCEPT; /* #ev_loop enters - #ev_loop leaves */ EV_API_DECL void ev_verify (EV_P) EV_NOEXCEPT; /* abort if loop data corrupted */ EV_API_DECL void ev_set_io_collect_interval (EV_P_ ev_tstamp interval) EV_NOEXCEPT; /* sleep at least this time, default 0 */ EV_API_DECL void ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) EV_NOEXCEPT; /* sleep at least this time, default 0 */ /* advanced stuff for threading etc. support, see docs */ EV_API_DECL void ev_set_userdata (EV_P_ void *data) EV_NOEXCEPT; EV_API_DECL void *ev_userdata (EV_P) EV_NOEXCEPT; typedef void (*ev_loop_callback)(EV_P); EV_API_DECL void ev_set_invoke_pending_cb (EV_P_ ev_loop_callback invoke_pending_cb) EV_NOEXCEPT; /* C++ doesn't allow the use of the ev_loop_callback typedef here, so we need to spell it out */ EV_API_DECL void ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_NOEXCEPT, void (*acquire)(EV_P) EV_NOEXCEPT) EV_NOEXCEPT; EV_API_DECL unsigned int ev_pending_count (EV_P) EV_NOEXCEPT; /* number of pending events, if any */ /* * stop/start the timer handling. */ EV_API_DECL void ev_suspend (EV_P) EV_NOEXCEPT; EV_API_DECL void ev_resume (EV_P) EV_NOEXCEPT; #endif #endif /* these may evaluate ev multiple times, and the other arguments at most once */ /* either use ev_init + ev_TYPE_set, or the ev_TYPE_init macro, below, to first initialise a watcher */ #define ev_init(ev,cb_) do { \ ((ev_watcher *)(void *)(ev))->active = \ ((ev_watcher *)(void *)(ev))->pending = 0; \ ev_set_priority ((ev), 0); \ ev_set_cb ((ev), cb_); \ } while (0) #define ev_io_modify(ev,events_) do { (ev)->events = (ev)->events & EV__IOFDSET | (events_); } while (0) #define ev_io_set(ev,fd_,events_) do { (ev)->fd = (fd_); (ev)->events = (events_) | EV__IOFDSET; } while (0) #define ev_timer_set(ev,after_,repeat_) do { ((ev_watcher_time *)(ev))->at = (after_); (ev)->repeat = (repeat_); } while (0) #define ev_periodic_set(ev,ofs_,ival_,rcb_) do { (ev)->offset = (ofs_); (ev)->interval = (ival_); (ev)->reschedule_cb = (rcb_); } while (0) #define ev_signal_set(ev,signum_) do { (ev)->signum = (signum_); } while (0) #define ev_child_set(ev,pid_,trace_) do { (ev)->pid = (pid_); (ev)->flags = !!(trace_); } while (0) #define ev_stat_set(ev,path_,interval_) do { (ev)->path = (path_); (ev)->interval = (interval_); (ev)->wd = -2; } while (0) #define ev_idle_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_prepare_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_check_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_embed_set(ev,other_) do { (ev)->other = (other_); } while (0) #define ev_fork_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_cleanup_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_async_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_io_init(ev,cb,fd,events) do { ev_init ((ev), (cb)); ev_io_set ((ev),(fd),(events)); } while (0) #define ev_timer_init(ev,cb,after,repeat) do { ev_init ((ev), (cb)); ev_timer_set ((ev),(after),(repeat)); } while (0) #define ev_periodic_init(ev,cb,ofs,ival,rcb) do { ev_init ((ev), (cb)); ev_periodic_set ((ev),(ofs),(ival),(rcb)); } while (0) #define ev_signal_init(ev,cb,signum) do { ev_init ((ev), (cb)); ev_signal_set ((ev), (signum)); } while (0) #define ev_child_init(ev,cb,pid,trace) do { ev_init ((ev), (cb)); ev_child_set ((ev),(pid),(trace)); } while (0) #define ev_stat_init(ev,cb,path,interval) do { ev_init ((ev), (cb)); ev_stat_set ((ev),(path),(interval)); } while (0) #define ev_idle_init(ev,cb) do { ev_init ((ev), (cb)); ev_idle_set ((ev)); } while (0) #define ev_prepare_init(ev,cb) do { ev_init ((ev), (cb)); ev_prepare_set ((ev)); } while (0) #define ev_check_init(ev,cb) do { ev_init ((ev), (cb)); ev_check_set ((ev)); } while (0) #define ev_embed_init(ev,cb,other) do { ev_init ((ev), (cb)); ev_embed_set ((ev),(other)); } while (0) #define ev_fork_init(ev,cb) do { ev_init ((ev), (cb)); ev_fork_set ((ev)); } while (0) #define ev_cleanup_init(ev,cb) do { ev_init ((ev), (cb)); ev_cleanup_set ((ev)); } while (0) #define ev_async_init(ev,cb) do { ev_init ((ev), (cb)); ev_async_set ((ev)); } while (0) #define ev_is_pending(ev) (0 + ((ev_watcher *)(void *)(ev))->pending) /* ro, true when watcher is waiting for callback invocation */ #define ev_is_active(ev) (0 + ((ev_watcher *)(void *)(ev))->active) /* ro, true when the watcher has been started */ #define ev_cb_(ev) (ev)->cb /* rw */ #define ev_cb(ev) (memmove (&ev_cb_ (ev), &((ev_watcher *)(ev))->cb, sizeof (ev_cb_ (ev))), (ev)->cb) #if EV_MINPRI == EV_MAXPRI # define ev_priority(ev) ((ev), EV_MINPRI) # define ev_set_priority(ev,pri) ((ev), (pri)) #else # define ev_priority(ev) (+(((ev_watcher *)(void *)(ev))->priority)) # define ev_set_priority(ev,pri) ( (ev_watcher *)(void *)(ev))->priority = (pri) #endif #define ev_periodic_at(ev) (+((ev_watcher_time *)(ev))->at) #ifndef ev_set_cb /* memmove is used here to avoid strict aliasing violations, and hopefully is optimized out by any reasonable compiler */ # define ev_set_cb(ev,cb_) (ev_cb_ (ev) = (cb_), memmove (&((ev_watcher *)(ev))->cb, &ev_cb_ (ev), sizeof (ev_cb_ (ev)))) #endif /* stopping (enabling, adding) a watcher does nothing if it is already running */ /* stopping (disabling, deleting) a watcher does nothing unless it's already running */ #if EV_PROTOTYPES /* feeds an event into a watcher as if the event actually occurred */ /* accepts any ev_watcher type */ EV_API_DECL void ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT; EV_API_DECL void ev_feed_fd_event (EV_P_ int fd, int revents) EV_NOEXCEPT; #if EV_SIGNAL_ENABLE EV_API_DECL void ev_feed_signal (int signum) EV_NOEXCEPT; EV_API_DECL void ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT; #endif EV_API_DECL void ev_invoke (EV_P_ void *w, int revents); EV_API_DECL int ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT; EV_API_DECL void ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT; EV_API_DECL void ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT; EV_API_DECL void ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT; EV_API_DECL void ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT; /* stops if active and no repeat, restarts if active and repeating, starts if inactive and repeating */ EV_API_DECL void ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT; /* return remaining time */ EV_API_DECL ev_tstamp ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT; #if EV_PERIODIC_ENABLE EV_API_DECL void ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT; EV_API_DECL void ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT; EV_API_DECL void ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT; #endif /* only supported in the default loop */ #if EV_SIGNAL_ENABLE EV_API_DECL void ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT; EV_API_DECL void ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT; #endif /* only supported in the default loop */ # if EV_CHILD_ENABLE EV_API_DECL void ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT; EV_API_DECL void ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT; # endif # if EV_STAT_ENABLE EV_API_DECL void ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT; EV_API_DECL void ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT; EV_API_DECL void ev_stat_stat (EV_P_ ev_stat *w) EV_NOEXCEPT; # endif # if EV_IDLE_ENABLE EV_API_DECL void ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT; EV_API_DECL void ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT; # endif #if EV_PREPARE_ENABLE EV_API_DECL void ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT; EV_API_DECL void ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT; #endif #if EV_CHECK_ENABLE EV_API_DECL void ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT; EV_API_DECL void ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT; #endif # if EV_FORK_ENABLE EV_API_DECL void ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT; EV_API_DECL void ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT; # endif # if EV_CLEANUP_ENABLE EV_API_DECL void ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT; EV_API_DECL void ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT; # endif # if EV_EMBED_ENABLE /* only supported when loop to be embedded is in fact embeddable */ EV_API_DECL void ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT; EV_API_DECL void ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT; EV_API_DECL void ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT; # endif # if EV_ASYNC_ENABLE EV_API_DECL void ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT; EV_API_DECL void ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT; EV_API_DECL void ev_async_send (EV_P_ ev_async *w) EV_NOEXCEPT; # endif #if EV_COMPAT3 #define EVLOOP_NONBLOCK EVRUN_NOWAIT #define EVLOOP_ONESHOT EVRUN_ONCE #define EVUNLOOP_CANCEL EVBREAK_CANCEL #define EVUNLOOP_ONE EVBREAK_ONE #define EVUNLOOP_ALL EVBREAK_ALL #if EV_PROTOTYPES EV_INLINE void ev_loop (EV_P_ int flags) { ev_run (EV_A_ flags); } EV_INLINE void ev_unloop (EV_P_ int how ) { ev_break (EV_A_ how ); } EV_INLINE void ev_default_destroy (void) { ev_loop_destroy (EV_DEFAULT); } EV_INLINE void ev_default_fork (void) { ev_loop_fork (EV_DEFAULT); } #if EV_FEATURE_API EV_INLINE unsigned int ev_loop_count (EV_P) { return ev_iteration (EV_A); } EV_INLINE unsigned int ev_loop_depth (EV_P) { return ev_depth (EV_A); } EV_INLINE void ev_loop_verify (EV_P) { ev_verify (EV_A); } #endif #endif #else typedef struct ev_loop ev_loop; #endif #endif EV_CPP(}) #endif nio4r-2.7.3/ext/libev/LICENSE0000644000004100000410000000400714632135320015514 0ustar www-datawww-dataAll files in libev are Copyright (c)2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Alternatively, the contents of this package may be used under the terms of the GNU General Public License ("GPL") version 2 or any later version, in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this package only under the terms of the GPL and not to allow others to use your version of this file under the BSD license, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL in this and the other files of this package. If you do not delete the provisions above, a recipient may use your version of this file under either the BSD or the GPL. nio4r-2.7.3/ext/libev/README0000644000004100000410000000503614632135320015372 0ustar www-datawww-datalibev is a high-performance event loop/event model with lots of features. (see benchmark at http://libev.schmorp.de/bench.html) ABOUT Homepage: http://software.schmorp.de/pkg/libev Mailinglist: libev@lists.schmorp.de http://lists.schmorp.de/cgi-bin/mailman/listinfo/libev Library Documentation: http://pod.tst.eu/http://cvs.schmorp.de/libev/ev.pod Libev is modelled (very losely) after libevent and the Event perl module, but is faster, scales better and is more correct, and also more featureful. And also smaller. Yay. Some of the specialties of libev not commonly found elsewhere are: - extensive and detailed, readable documentation (not doxygen garbage). - fully supports fork, can detect fork in various ways and automatically re-arms kernel mechanisms that do not support fork. - highly optimised select, poll, linux epoll, linux aio, bsd kqueue and solaris event ports backends. - filesystem object (path) watching (with optional linux inotify support). - wallclock-based times (using absolute time, cron-like). - relative timers/timeouts (handle time jumps). - fast intra-thread communication between multiple event loops (with optional fast linux eventfd backend). - extremely easy to embed (fully documented, no dependencies, autoconf supported but optional). - very small codebase, no bloated library, simple code. - fully extensible by being able to plug into the event loop, integrate other event loops, integrate other event loop users. - very little memory use (small watchers, small event loop data). - optional C++ interface allowing method and function callbacks at no extra memory or runtime overhead. - optional Perl interface with similar characteristics (capable of running Glib/Gtk2 on libev). - support for other languages (multiple C++ interfaces, D, Ruby, Python) available from third-parties. Examples of programs that embed libev: the EV perl module, node.js, auditd, rxvt-unicode, gvpe (GNU Virtual Private Ethernet), the Deliantra MMORPG server (http://www.deliantra.net/), Rubinius (a next-generation Ruby VM), the Ebb web server, the Rev event toolkit. CONTRIBUTORS libev was written and designed by Marc Lehmann and Emanuele Giaquinta. The following people sent in patches or made other noteworthy contributions to the design (for minor patches, see the Changes file. If I forgot to include you, please shout at me, it was an accident): W.C.A. Wijngaards Christopher Layne Chris Brody nio4r-2.7.3/ext/libev/ev_port.c0000644000004100000410000001460014632135320016331 0ustar www-datawww-data/* * libev solaris event port backend * * Copyright (c) 2007,2008,2009,2010,2011,2019 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ /* useful reading: * * http://bugs.opensolaris.org/view_bug.do?bug_id=6268715 (random results) * http://bugs.opensolaris.org/view_bug.do?bug_id=6455223 (just totally broken) * http://bugs.opensolaris.org/view_bug.do?bug_id=6873782 (manpage ETIME) * http://bugs.opensolaris.org/view_bug.do?bug_id=6874410 (implementation ETIME) * http://www.mail-archive.com/networking-discuss@opensolaris.org/msg11898.html ETIME vs. nget * http://src.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/lib/libc/port/gen/event_port.c (libc) * http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/fs/portfs/port.c#1325 (kernel) */ #include #include #include #include #include #include inline_speed void port_associate_and_check (EV_P_ int fd, int ev) { if (0 > port_associate ( backend_fd, PORT_SOURCE_FD, fd, (ev & EV_READ ? POLLIN : 0) | (ev & EV_WRITE ? POLLOUT : 0), 0 ) ) { if (errno == EBADFD) { assert (("libev: port_associate found invalid fd", errno != EBADFD)); fd_kill (EV_A_ fd); } else ev_syserr ("(libev) port_associate"); } } static void port_modify (EV_P_ int fd, int oev, int nev) { /* we need to reassociate no matter what, as closes are * once more silently being discarded. */ if (!nev) { if (oev) port_dissociate (backend_fd, PORT_SOURCE_FD, fd); } else port_associate_and_check (EV_A_ fd, nev); } static void port_poll (EV_P_ ev_tstamp timeout) { int res, i; struct timespec ts; uint_t nget = 1; /* we initialise this to something we will skip in the loop, as */ /* port_getn can return with nget unchanged, but no indication */ /* whether it was the original value or has been updated :/ */ port_events [0].portev_source = 0; EV_RELEASE_CB; EV_TS_SET (ts, timeout); res = port_getn (backend_fd, port_events, port_eventmax, &nget, &ts); EV_ACQUIRE_CB; /* port_getn may or may not set nget on error */ /* so we rely on port_events [0].portev_source not being updated */ if (res == -1 && errno != ETIME && errno != EINTR) ev_syserr ("(libev) port_getn (see http://bugs.opensolaris.org/view_bug.do?bug_id=6268715, try LIBEV_FLAGS=3 env variable)"); for (i = 0; i < nget; ++i) { if (port_events [i].portev_source == PORT_SOURCE_FD) { int fd = port_events [i].portev_object; fd_event ( EV_A_ fd, (port_events [i].portev_events & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) | (port_events [i].portev_events & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) ); fd_change (EV_A_ fd, EV__IOFDSET); } } if (ecb_expect_false (nget == port_eventmax)) { ev_free (port_events); port_eventmax = array_nextsize (sizeof (port_event_t), port_eventmax, port_eventmax + 1); port_events = (port_event_t *)ev_malloc (sizeof (port_event_t) * port_eventmax); } } inline_size int port_init (EV_P_ int flags) { /* Initialize the kernel queue */ if ((backend_fd = port_create ()) < 0) return 0; assert (("libev: PORT_SOURCE_FD must not be zero", PORT_SOURCE_FD)); fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ /* if my reading of the opensolaris kernel sources are correct, then * opensolaris does something very stupid: it checks if the time has already * elapsed and doesn't round up if that is the case, otherwise it DOES round * up. Since we can't know what the case is, we need to guess by using a * "large enough" timeout. Normally, 1e-9 would be correct. */ backend_mintime = EV_TS_CONST (1e-3); /* needed to compensate for port_getn returning early */ backend_modify = port_modify; backend_poll = port_poll; port_eventmax = 64; /* initial number of events receivable per poll */ port_events = (port_event_t *)ev_malloc (sizeof (port_event_t) * port_eventmax); return EVBACKEND_PORT; } inline_size void port_destroy (EV_P) { ev_free (port_events); } inline_size void port_fork (EV_P) { close (backend_fd); while ((backend_fd = port_create ()) < 0) ev_syserr ("(libev) port"); fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* re-register interest in fds */ fd_rearm_all (EV_A); } nio4r-2.7.3/ext/libev/ev.c0000644000004100000410000045510714632135320015300 0ustar www-datawww-data/* * libev event processing core, watcher management * * Copyright (c) 2007-2019 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ /* ########## NIO4R PATCHERY HO! ########## */ #include "ruby.h" #include "ruby/thread.h" #ifdef __APPLE__ #include #endif /* ######################################## */ /* this big block deduces configuration from config.h */ #ifndef EV_STANDALONE # ifdef EV_CONFIG_H # include EV_CONFIG_H # else # include "config.h" # endif # if HAVE_FLOOR # ifndef EV_USE_FLOOR # define EV_USE_FLOOR 1 # endif # endif # if HAVE_CLOCK_SYSCALL # ifndef EV_USE_CLOCK_SYSCALL # define EV_USE_CLOCK_SYSCALL 1 # ifndef EV_USE_REALTIME # define EV_USE_REALTIME 0 # endif # ifndef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 1 # endif # endif # elif !defined EV_USE_CLOCK_SYSCALL # define EV_USE_CLOCK_SYSCALL 0 # endif # if HAVE_CLOCK_GETTIME # ifndef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 1 # endif # ifndef EV_USE_REALTIME # define EV_USE_REALTIME 0 # endif # else # ifndef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 0 # endif # ifndef EV_USE_REALTIME # define EV_USE_REALTIME 0 # endif # endif # if HAVE_NANOSLEEP # ifndef EV_USE_NANOSLEEP # define EV_USE_NANOSLEEP EV_FEATURE_OS # endif # else # undef EV_USE_NANOSLEEP # define EV_USE_NANOSLEEP 0 # endif # if HAVE_SELECT && HAVE_SYS_SELECT_H # ifndef EV_USE_SELECT # define EV_USE_SELECT EV_FEATURE_BACKENDS # endif # else # undef EV_USE_SELECT # define EV_USE_SELECT 0 # endif # if HAVE_POLL && HAVE_POLL_H # ifndef EV_USE_POLL # define EV_USE_POLL EV_FEATURE_BACKENDS # endif # else # undef EV_USE_POLL # define EV_USE_POLL 0 # endif # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H # ifndef EV_USE_EPOLL # define EV_USE_EPOLL EV_FEATURE_BACKENDS # endif # else # undef EV_USE_EPOLL # define EV_USE_EPOLL 0 # endif # if HAVE_LINUX_AIO_ABI_H # ifndef EV_USE_LINUXAIO # define EV_USE_LINUXAIO 0 /* was: EV_FEATURE_BACKENDS, always off by default */ # endif # else # undef EV_USE_LINUXAIO # define EV_USE_LINUXAIO 0 # endif # if HAVE_LINUX_FS_H && HAVE_SYS_TIMERFD_H && HAVE_KERNEL_RWF_T # ifndef EV_USE_IOURING # define EV_USE_IOURING EV_FEATURE_BACKENDS # endif # else # undef EV_USE_IOURING # define EV_USE_IOURING 0 # endif # if HAVE_KQUEUE && HAVE_SYS_EVENT_H # ifndef EV_USE_KQUEUE # define EV_USE_KQUEUE EV_FEATURE_BACKENDS # endif # else # undef EV_USE_KQUEUE # define EV_USE_KQUEUE 0 # endif # if HAVE_PORT_H && HAVE_PORT_CREATE # ifndef EV_USE_PORT # define EV_USE_PORT EV_FEATURE_BACKENDS # endif # else # undef EV_USE_PORT # define EV_USE_PORT 0 # endif # if HAVE_INOTIFY_INIT && HAVE_SYS_INOTIFY_H # ifndef EV_USE_INOTIFY # define EV_USE_INOTIFY EV_FEATURE_OS # endif # else # undef EV_USE_INOTIFY # define EV_USE_INOTIFY 0 # endif # if HAVE_SIGNALFD && HAVE_SYS_SIGNALFD_H # ifndef EV_USE_SIGNALFD # define EV_USE_SIGNALFD EV_FEATURE_OS # endif # else # undef EV_USE_SIGNALFD # define EV_USE_SIGNALFD 0 # endif # if HAVE_EVENTFD # ifndef EV_USE_EVENTFD # define EV_USE_EVENTFD EV_FEATURE_OS # endif # else # undef EV_USE_EVENTFD # define EV_USE_EVENTFD 0 # endif # if HAVE_SYS_TIMERFD_H # ifndef EV_USE_TIMERFD # define EV_USE_TIMERFD EV_FEATURE_OS # endif # else # undef EV_USE_TIMERFD # define EV_USE_TIMERFD 0 # endif #endif /* OS X, in its infinite idiocy, actually HARDCODES * a limit of 1024 into their select. Where people have brains, * OS X engineers apparently have a vacuum. Or maybe they were * ordered to have a vacuum, or they do anything for money. * This might help. Or not. * Note that this must be defined early, as other include files * will rely on this define as well. */ #define _DARWIN_UNLIMITED_SELECT 1 #include #include #include #include #include #include #include #include #include #include #include #ifdef EV_H # include EV_H #else # include "ev.h" #endif #if EV_NO_THREADS # undef EV_NO_SMP # define EV_NO_SMP 1 # undef ECB_NO_THREADS # define ECB_NO_THREADS 1 #endif #if EV_NO_SMP # undef EV_NO_SMP # define ECB_NO_SMP 1 #endif #ifndef _WIN32 # include # include # include #else # include # define WIN32_LEAN_AND_MEAN # include # include # ifndef EV_SELECT_IS_WINSOCKET # define EV_SELECT_IS_WINSOCKET 1 # endif # undef EV_AVOID_STDIO #endif /* this block tries to deduce configuration from header-defined symbols and defaults */ /* try to deduce the maximum number of signals on this platform */ #if defined EV_NSIG /* use what's provided */ #elif defined NSIG # define EV_NSIG (NSIG) #elif defined _NSIG # define EV_NSIG (_NSIG) #elif defined SIGMAX # define EV_NSIG (SIGMAX+1) #elif defined SIG_MAX # define EV_NSIG (SIG_MAX+1) #elif defined _SIG_MAX # define EV_NSIG (_SIG_MAX+1) #elif defined MAXSIG # define EV_NSIG (MAXSIG+1) #elif defined MAX_SIG # define EV_NSIG (MAX_SIG+1) #elif defined SIGARRAYSIZE # define EV_NSIG (SIGARRAYSIZE) /* Assume ary[SIGARRAYSIZE] */ #elif defined _sys_nsig # define EV_NSIG (_sys_nsig) /* Solaris 2.5 */ #else # define EV_NSIG (8 * sizeof (sigset_t) + 1) #endif #ifndef EV_USE_FLOOR # define EV_USE_FLOOR 0 #endif #ifndef EV_USE_CLOCK_SYSCALL # if __linux && __GLIBC__ == 2 && __GLIBC_MINOR__ < 17 # define EV_USE_CLOCK_SYSCALL EV_FEATURE_OS # else # define EV_USE_CLOCK_SYSCALL 0 # endif #endif #if !(_POSIX_TIMERS > 0) # ifndef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 0 # endif # ifndef EV_USE_REALTIME # define EV_USE_REALTIME 0 # endif #endif #ifndef EV_USE_MONOTONIC # if defined _POSIX_MONOTONIC_CLOCK && _POSIX_MONOTONIC_CLOCK >= 0 # define EV_USE_MONOTONIC EV_FEATURE_OS # else # define EV_USE_MONOTONIC 0 # endif #endif #ifndef EV_USE_REALTIME # define EV_USE_REALTIME !EV_USE_CLOCK_SYSCALL #endif #ifndef EV_USE_NANOSLEEP # if _POSIX_C_SOURCE >= 199309L # define EV_USE_NANOSLEEP EV_FEATURE_OS # else # define EV_USE_NANOSLEEP 0 # endif #endif #ifndef EV_USE_SELECT # define EV_USE_SELECT EV_FEATURE_BACKENDS #endif #ifndef EV_USE_POLL # ifdef _WIN32 # define EV_USE_POLL 0 # else # define EV_USE_POLL EV_FEATURE_BACKENDS # endif #endif #ifndef EV_USE_EPOLL # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4)) # define EV_USE_EPOLL EV_FEATURE_BACKENDS # else # define EV_USE_EPOLL 0 # endif #endif #ifndef EV_USE_KQUEUE # define EV_USE_KQUEUE 0 #endif #ifndef EV_USE_PORT # define EV_USE_PORT 0 #endif #ifndef EV_USE_LINUXAIO # define EV_USE_LINUXAIO 0 #endif #ifndef EV_USE_IOURING # define EV_USE_IOURING 0 #endif #ifndef EV_USE_INOTIFY # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4)) # define EV_USE_INOTIFY EV_FEATURE_OS # else # define EV_USE_INOTIFY 0 # endif #endif #ifndef EV_PID_HASHSIZE # define EV_PID_HASHSIZE EV_FEATURE_DATA ? 16 : 1 #endif #ifndef EV_INOTIFY_HASHSIZE # define EV_INOTIFY_HASHSIZE EV_FEATURE_DATA ? 16 : 1 #endif #ifndef EV_USE_EVENTFD # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) # define EV_USE_EVENTFD EV_FEATURE_OS # else # define EV_USE_EVENTFD 0 # endif #endif #ifndef EV_USE_SIGNALFD # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) # define EV_USE_SIGNALFD EV_FEATURE_OS # else # define EV_USE_SIGNALFD 0 # endif #endif #ifndef EV_USE_TIMERFD # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8)) # define EV_USE_TIMERFD EV_FEATURE_OS # else # define EV_USE_TIMERFD 0 # endif #endif #if 0 /* debugging */ # define EV_VERIFY 3 # define EV_USE_4HEAP 1 # define EV_HEAP_CACHE_AT 1 #endif #ifndef EV_VERIFY # define EV_VERIFY (EV_FEATURE_API ? 1 : 0) #endif #ifndef EV_USE_4HEAP # define EV_USE_4HEAP EV_FEATURE_DATA #endif #ifndef EV_HEAP_CACHE_AT # define EV_HEAP_CACHE_AT EV_FEATURE_DATA #endif #ifdef __ANDROID__ /* supposedly, android doesn't typedef fd_mask */ # undef EV_USE_SELECT # define EV_USE_SELECT 0 /* supposedly, we need to include syscall.h, not sys/syscall.h, so just disable */ # undef EV_USE_CLOCK_SYSCALL # define EV_USE_CLOCK_SYSCALL 0 #endif /* aix's poll.h seems to cause lots of trouble */ #ifdef _AIX /* AIX has a completely broken poll.h header */ # undef EV_USE_POLL # define EV_USE_POLL 0 #endif /* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */ /* which makes programs even slower. might work on other unices, too. */ #if EV_USE_CLOCK_SYSCALL # include # ifdef SYS_clock_gettime # define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts)) # undef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 1 # define EV_NEED_SYSCALL 1 # else # undef EV_USE_CLOCK_SYSCALL # define EV_USE_CLOCK_SYSCALL 0 # endif #endif /* this block fixes any misconfiguration where we know we run into trouble otherwise */ #ifndef CLOCK_MONOTONIC # undef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 0 #endif #ifndef CLOCK_REALTIME # undef EV_USE_REALTIME # define EV_USE_REALTIME 0 #endif #if !EV_STAT_ENABLE # undef EV_USE_INOTIFY # define EV_USE_INOTIFY 0 #endif #if !EV_USE_NANOSLEEP /* hp-ux has it in sys/time.h, which we unconditionally include above */ # if !defined _WIN32 && !defined __hpux # include # endif #endif #if EV_USE_LINUXAIO # include # if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */ # define EV_NEED_SYSCALL 1 # else # undef EV_USE_LINUXAIO # define EV_USE_LINUXAIO 0 # endif #endif #if EV_USE_IOURING # include # if !SYS_io_uring_setup && __linux && !__alpha # define SYS_io_uring_setup 425 # define SYS_io_uring_enter 426 # define SYS_io_uring_wregister 427 # endif # if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */ # define EV_NEED_SYSCALL 1 # else # undef EV_USE_IOURING # define EV_USE_IOURING 0 # endif #endif #if EV_USE_INOTIFY # include # include /* some very old inotify.h headers don't have IN_DONT_FOLLOW */ # ifndef IN_DONT_FOLLOW # undef EV_USE_INOTIFY # define EV_USE_INOTIFY 0 # endif #endif #if EV_USE_EVENTFD /* our minimum requirement is glibc 2.7 which has the stub, but not the full header */ # include # ifndef EFD_NONBLOCK # define EFD_NONBLOCK O_NONBLOCK # endif # ifndef EFD_CLOEXEC # ifdef O_CLOEXEC # define EFD_CLOEXEC O_CLOEXEC # else # define EFD_CLOEXEC 02000000 # endif # endif EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags); #endif #if EV_USE_SIGNALFD /* our minimum requirement is glibc 2.7 which has the stub, but not the full header */ # include # ifndef SFD_NONBLOCK # define SFD_NONBLOCK O_NONBLOCK # endif # ifndef SFD_CLOEXEC # ifdef O_CLOEXEC # define SFD_CLOEXEC O_CLOEXEC # else # define SFD_CLOEXEC 02000000 # endif # endif EV_CPP (extern "C") int (signalfd) (int fd, const sigset_t *mask, int flags); struct signalfd_siginfo { uint32_t ssi_signo; char pad[128 - sizeof (uint32_t)]; }; #endif /* for timerfd, libev core requires TFD_TIMER_CANCEL_ON_SET &c */ #if EV_USE_TIMERFD # include /* timerfd is only used for periodics */ # if !(defined (TFD_TIMER_CANCEL_ON_SET) && defined (TFD_CLOEXEC) && defined (TFD_NONBLOCK)) || !EV_PERIODIC_ENABLE # undef EV_USE_TIMERFD # define EV_USE_TIMERFD 0 # endif #endif /*****************************************************************************/ #if EV_VERIFY >= 3 # define EV_FREQUENT_CHECK ev_verify (EV_A) #else # define EV_FREQUENT_CHECK do { } while (0) #endif /* * This is used to work around floating point rounding problems. * This value is good at least till the year 4000. */ #define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */ /*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */ #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ #define MAX_BLOCKTIME2 1500001.07 /* same, but when timerfd is used to detect jumps, also safe delay to not overflow */ /* find a portable timestamp that is "always" in the future but fits into time_t. * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t, * and sizes larger than 32 bit, and maybe the unlikely floating point time_t */ #define EV_TSTAMP_HUGE \ (sizeof (time_t) >= 8 ? 10000000000000. \ : 0 < (time_t)4294967295 ? 4294967295. \ : 2147483647.) \ #ifndef EV_TS_CONST # define EV_TS_CONST(nv) nv # define EV_TS_TO_MSEC(a) a * 1e3 + 0.9999 # define EV_TS_FROM_USEC(us) us * 1e-6 # define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) # define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) # define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6) # define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9) #endif /* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ /* ECB.H BEGIN */ /* * libecb - http://software.schmorp.de/pkg/libecb * * Copyright (©) 2009-2015,2018-2020 Marc Alexander Lehmann * Copyright (©) 2011 Emanuele Giaquinta * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #ifndef ECB_H #define ECB_H /* 16 bits major, 16 bits minor */ #define ECB_VERSION 0x00010008 #include /* for memcpy */ #if defined (_WIN32) && !defined (__MINGW32__) typedef signed char int8_t; typedef unsigned char uint8_t; typedef signed char int_fast8_t; typedef unsigned char uint_fast8_t; typedef signed short int16_t; typedef unsigned short uint16_t; typedef signed int int_fast16_t; typedef unsigned int uint_fast16_t; typedef signed int int32_t; typedef unsigned int uint32_t; typedef signed int int_fast32_t; typedef unsigned int uint_fast32_t; #if __GNUC__ typedef signed long long int64_t; typedef unsigned long long uint64_t; #else /* _MSC_VER || __BORLANDC__ */ typedef signed __int64 int64_t; typedef unsigned __int64 uint64_t; #endif typedef int64_t int_fast64_t; typedef uint64_t uint_fast64_t; #ifdef _WIN64 #define ECB_PTRSIZE 8 typedef uint64_t uintptr_t; typedef int64_t intptr_t; #else #define ECB_PTRSIZE 4 typedef uint32_t uintptr_t; typedef int32_t intptr_t; #endif #else #include #if (defined INTPTR_MAX ? INTPTR_MAX : ULONG_MAX) > 0xffffffffU #define ECB_PTRSIZE 8 #else #define ECB_PTRSIZE 4 #endif #endif #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__) #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64) #ifndef ECB_OPTIMIZE_SIZE #if __OPTIMIZE_SIZE__ #define ECB_OPTIMIZE_SIZE 1 #else #define ECB_OPTIMIZE_SIZE 0 #endif #endif /* work around x32 idiocy by defining proper macros */ #if ECB_GCC_AMD64 || ECB_MSVC_AMD64 #if _ILP32 #define ECB_AMD64_X32 1 #else #define ECB_AMD64 1 #endif #endif /* many compilers define _GNUC_ to some versions but then only implement * what their idiot authors think are the "more important" extensions, * causing enormous grief in return for some better fake benchmark numbers. * or so. * we try to detect these and simply assume they are not gcc - if they have * an issue with that they should have done it right in the first place. */ #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__ #define ECB_GCC_VERSION(major,minor) 0 #else #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) #endif #define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor))) #if __clang__ && defined __has_builtin #define ECB_CLANG_BUILTIN(x) __has_builtin (x) #else #define ECB_CLANG_BUILTIN(x) 0 #endif #if __clang__ && defined __has_extension #define ECB_CLANG_EXTENSION(x) __has_extension (x) #else #define ECB_CLANG_EXTENSION(x) 0 #endif #define ECB_CPP (__cplusplus+0) #define ECB_CPP11 (__cplusplus >= 201103L) #define ECB_CPP14 (__cplusplus >= 201402L) #define ECB_CPP17 (__cplusplus >= 201703L) #if ECB_CPP #define ECB_C 0 #define ECB_STDC_VERSION 0 #else #define ECB_C 1 #define ECB_STDC_VERSION __STDC_VERSION__ #endif #define ECB_C99 (ECB_STDC_VERSION >= 199901L) #define ECB_C11 (ECB_STDC_VERSION >= 201112L) #define ECB_C17 (ECB_STDC_VERSION >= 201710L) #if ECB_CPP #define ECB_EXTERN_C extern "C" #define ECB_EXTERN_C_BEG ECB_EXTERN_C { #define ECB_EXTERN_C_END } #else #define ECB_EXTERN_C extern #define ECB_EXTERN_C_BEG #define ECB_EXTERN_C_END #endif /*****************************************************************************/ /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ #if ECB_NO_THREADS #define ECB_NO_SMP 1 #endif #if ECB_NO_SMP #define ECB_MEMORY_FENCE do { } while (0) #endif /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */ #if __xlC__ && ECB_CPP #include #endif #if 1400 <= _MSC_VER #include /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */ #endif #ifndef ECB_MEMORY_FENCE #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 #define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory") #if __i386 || __i386__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory") #elif ECB_GCC_AMD64 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory") #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") #elif defined __ARM_ARCH_2__ \ || defined __ARM_ARCH_3__ || defined __ARM_ARCH_3M__ \ || defined __ARM_ARCH_4__ || defined __ARM_ARCH_4T__ \ || defined __ARM_ARCH_5__ || defined __ARM_ARCH_5E__ \ || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__ \ || defined __ARM_ARCH_5TEJ__ /* should not need any, unless running old code on newer cpu - arm doesn't support that */ #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ \ || defined __ARM_ARCH_6T2__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") #elif __aarch64__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory") #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8) #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") #elif defined __s390__ || defined __s390x__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") #elif defined __mips__ /* GNU/Linux emulates sync on mips1 architectures, so we force its use */ /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */ #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory") #elif defined __alpha__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") #elif defined __hppa__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") #elif defined __ia64__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory") #elif defined __m68k__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") #elif defined __m88k__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory") #elif defined __sh__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") #endif #endif #endif #ifndef ECB_MEMORY_FENCE #if ECB_GCC_VERSION(4,7) /* see comment below (stdatomic.h) about the C11 memory model. */ #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE) #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE) #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED) #elif ECB_CLANG_EXTENSION(c_atomic) /* see comment below (stdatomic.h) about the C11 memory model. */ #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE) #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE) #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED) #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ #define ECB_MEMORY_FENCE __sync_synchronize () #elif _MSC_VER >= 1500 /* VC++ 2008 */ /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */ #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier() #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */ #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier() #elif _MSC_VER >= 1400 /* VC++ 2005 */ #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) #define ECB_MEMORY_FENCE _ReadWriteBarrier () #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () #elif defined _WIN32 #include #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 #include #define ECB_MEMORY_FENCE __machine_rw_barrier () #define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier () #define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier () #define ECB_MEMORY_FENCE_RELAXED __compiler_barrier () #elif __xlC__ #define ECB_MEMORY_FENCE __sync () #endif #endif #ifndef ECB_MEMORY_FENCE #if ECB_C11 && !defined __STDC_NO_ATOMICS__ /* we assume that these memory fences work on all variables/all memory accesses, */ /* not just C11 atomics and atomic accesses */ #include #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire) #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release) #endif #endif #ifndef ECB_MEMORY_FENCE #if !ECB_AVOID_PTHREADS /* * if you get undefined symbol references to pthread_mutex_lock, * or failure to find pthread.h, then you should implement * the ECB_MEMORY_FENCE operations for your cpu/compiler * OR provide pthread.h and link against the posix thread library * of your system. */ #include #define ECB_NEEDS_PTHREADS 1 #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1 static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER; #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0) #endif #endif #if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE #endif #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE #endif #if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE #define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */ #endif /*****************************************************************************/ #if ECB_CPP #define ecb_inline static inline #elif ECB_GCC_VERSION(2,5) #define ecb_inline static __inline__ #elif ECB_C99 #define ecb_inline static inline #else #define ecb_inline static #endif #if ECB_GCC_VERSION(3,3) #define ecb_restrict __restrict__ #elif ECB_C99 #define ecb_restrict restrict #else #define ecb_restrict #endif typedef int ecb_bool; #define ECB_CONCAT_(a, b) a ## b #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b) #define ECB_STRINGIFY_(a) # a #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) #define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr)) #define ecb_function_ ecb_inline #if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8) #define ecb_attribute(attrlist) __attribute__ (attrlist) #else #define ecb_attribute(attrlist) #endif #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p) #define ecb_is_constant(expr) __builtin_constant_p (expr) #else /* possible C11 impl for integral types typedef struct ecb_is_constant_struct ecb_is_constant_struct; #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */ #define ecb_is_constant(expr) 0 #endif #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect) #define ecb_expect(expr,value) __builtin_expect ((expr),(value)) #else #define ecb_expect(expr,value) (expr) #endif #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch) #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality) #else #define ecb_prefetch(addr,rw,locality) #endif /* no emulation for ecb_decltype */ #if ECB_CPP11 // older implementations might have problems with decltype(x)::type, work around it template struct ecb_decltype_t { typedef T type; }; #define ecb_decltype(x) ecb_decltype_t::type #elif ECB_GCC_VERSION(3,0) || ECB_CLANG_VERSION(2,8) #define ecb_decltype(x) __typeof__ (x) #endif #if _MSC_VER >= 1300 #define ecb_deprecated __declspec (deprecated) #else #define ecb_deprecated ecb_attribute ((__deprecated__)) #endif #if _MSC_VER >= 1500 #define ecb_deprecated_message(msg) __declspec (deprecated (msg)) #elif ECB_GCC_VERSION(4,5) #define ecb_deprecated_message(msg) ecb_attribute ((__deprecated__ (msg)) #else #define ecb_deprecated_message(msg) ecb_deprecated #endif #if _MSC_VER >= 1400 #define ecb_noinline __declspec (noinline) #else #define ecb_noinline ecb_attribute ((__noinline__)) #endif #define ecb_unused ecb_attribute ((__unused__)) #define ecb_const ecb_attribute ((__const__)) #define ecb_pure ecb_attribute ((__pure__)) #if ECB_C11 || __IBMC_NORETURN /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/language_ref/noreturn.html */ #define ecb_noreturn _Noreturn #elif ECB_CPP11 #define ecb_noreturn [[noreturn]] #elif _MSC_VER >= 1200 /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx */ #define ecb_noreturn __declspec (noreturn) #else #define ecb_noreturn ecb_attribute ((__noreturn__)) #endif #if ECB_GCC_VERSION(4,3) #define ecb_artificial ecb_attribute ((__artificial__)) #define ecb_hot ecb_attribute ((__hot__)) #define ecb_cold ecb_attribute ((__cold__)) #else #define ecb_artificial #define ecb_hot #define ecb_cold #endif /* put around conditional expressions if you are very sure that the */ /* expression is mostly true or mostly false. note that these return */ /* booleans, not the expression. */ #define ecb_expect_false(expr) ecb_expect (!!(expr), 0) #define ecb_expect_true(expr) ecb_expect (!!(expr), 1) /* for compatibility to the rest of the world */ #define ecb_likely(expr) ecb_expect_true (expr) #define ecb_unlikely(expr) ecb_expect_false (expr) /* count trailing zero bits and count # of one bits */ #if ECB_GCC_VERSION(3,4) \ || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \ && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \ && ECB_CLANG_BUILTIN(__builtin_popcount)) /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */ #define ecb_ld32(x) (__builtin_clz (x) ^ 31) #define ecb_ld64(x) (__builtin_clzll (x) ^ 63) #define ecb_ctz32(x) __builtin_ctz (x) #define ecb_ctz64(x) __builtin_ctzll (x) #define ecb_popcount32(x) __builtin_popcount (x) /* no popcountll */ #else ecb_function_ ecb_const int ecb_ctz32 (uint32_t x); ecb_function_ ecb_const int ecb_ctz32 (uint32_t x) { #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) unsigned long r; _BitScanForward (&r, x); return (int)r; #else int r = 0; x &= ~x + 1; /* this isolates the lowest bit */ #if ECB_branchless_on_i386 r += !!(x & 0xaaaaaaaa) << 0; r += !!(x & 0xcccccccc) << 1; r += !!(x & 0xf0f0f0f0) << 2; r += !!(x & 0xff00ff00) << 3; r += !!(x & 0xffff0000) << 4; #else if (x & 0xaaaaaaaa) r += 1; if (x & 0xcccccccc) r += 2; if (x & 0xf0f0f0f0) r += 4; if (x & 0xff00ff00) r += 8; if (x & 0xffff0000) r += 16; #endif return r; #endif } ecb_function_ ecb_const int ecb_ctz64 (uint64_t x); ecb_function_ ecb_const int ecb_ctz64 (uint64_t x) { #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM) unsigned long r; _BitScanForward64 (&r, x); return (int)r; #else int shift = x & 0xffffffff ? 0 : 32; return ecb_ctz32 (x >> shift) + shift; #endif } ecb_function_ ecb_const int ecb_popcount32 (uint32_t x); ecb_function_ ecb_const int ecb_popcount32 (uint32_t x) { x -= (x >> 1) & 0x55555555; x = ((x >> 2) & 0x33333333) + (x & 0x33333333); x = ((x >> 4) + x) & 0x0f0f0f0f; x *= 0x01010101; return x >> 24; } ecb_function_ ecb_const int ecb_ld32 (uint32_t x); ecb_function_ ecb_const int ecb_ld32 (uint32_t x) { #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) unsigned long r; _BitScanReverse (&r, x); return (int)r; #else int r = 0; if (x >> 16) { x >>= 16; r += 16; } if (x >> 8) { x >>= 8; r += 8; } if (x >> 4) { x >>= 4; r += 4; } if (x >> 2) { x >>= 2; r += 2; } if (x >> 1) { r += 1; } return r; #endif } ecb_function_ ecb_const int ecb_ld64 (uint64_t x); ecb_function_ ecb_const int ecb_ld64 (uint64_t x) { #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM) unsigned long r; _BitScanReverse64 (&r, x); return (int)r; #else int r = 0; if (x >> 32) { x >>= 32; r += 32; } return r + ecb_ld32 (x); #endif } #endif ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x); ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x); ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); } ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x); ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x) { return ( (x * 0x0802U & 0x22110U) | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16; } ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x); ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x) { x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1); x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2); x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4); x = ( x >> 8 ) | ( x << 8); return x; } ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x); ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x) { x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1); x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2); x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4); x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8); x = ( x >> 16 ) | ( x << 16); return x; } /* popcount64 is only available on 64 bit cpus as gcc builtin */ /* so for this version we are lazy */ ecb_function_ ecb_const int ecb_popcount64 (uint64_t x); ecb_function_ ecb_const int ecb_popcount64 (uint64_t x) { return ecb_popcount32 (x) + ecb_popcount32 (x >> 32); } ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count); ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count); ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count); ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count); ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count); ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count); ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count); ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count); ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); } ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); } ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); } ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); } ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } #if ECB_CPP inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); } inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); } inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); } inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); } inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); } inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); } inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); } inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); } inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); } inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); } inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); } inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); } inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); } inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); } inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); } inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); } inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); } inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); } inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); } inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); } inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); } inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); } inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); } inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); } inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); } inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); } inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); } #endif #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64)) #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16) #define ecb_bswap16(x) __builtin_bswap16 (x) #else #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) #endif #define ecb_bswap32(x) __builtin_bswap32 (x) #define ecb_bswap64(x) __builtin_bswap64 (x) #elif _MSC_VER #include #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x))) #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x))) #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x))) #else ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x); ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x) { return ecb_rotl16 (x, 8); } ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x); ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x) { return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16); } ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x); ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x) { return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32); } #endif #if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable) #define ecb_unreachable() __builtin_unreachable () #else /* this seems to work fine, but gcc always emits a warning for it :/ */ ecb_inline ecb_noreturn void ecb_unreachable (void); ecb_inline ecb_noreturn void ecb_unreachable (void) { } #endif /* try to tell the compiler that some condition is definitely true */ #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0 ecb_inline ecb_const uint32_t ecb_byteorder_helper (void); ecb_inline ecb_const uint32_t ecb_byteorder_helper (void) { /* the union code still generates code under pressure in gcc, */ /* but less than using pointers, and always seems to */ /* successfully return a constant. */ /* the reason why we have this horrible preprocessor mess */ /* is to avoid it in all cases, at least on common architectures */ /* or when using a recent enough gcc version (>= 4.6) */ #if (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ || ((__i386 || __i386__ || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64) && !__VOS__) #define ECB_LITTLE_ENDIAN 1 return 0x44332211; #elif (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) \ || ((__AARCH64EB__ || __MIPSEB__ || __ARMEB__) && !__VOS__) #define ECB_BIG_ENDIAN 1 return 0x11223344; #else union { uint8_t c[4]; uint32_t u; } u = { 0x11, 0x22, 0x33, 0x44 }; return u.u; #endif } ecb_inline ecb_const ecb_bool ecb_big_endian (void); ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; } ecb_inline ecb_const ecb_bool ecb_little_endian (void); ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; } /*****************************************************************************/ /* unaligned load/store */ ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; } ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; } ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; } ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; } ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; } ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; } ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; } ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; } ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; } ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); } ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); } ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); } ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); } ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); } ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); } ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; } ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; } ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; } ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; } ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; } ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; } ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); } ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); } ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); } ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); } ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); } ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); } ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); } ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); } ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); } #if ECB_CPP inline uint8_t ecb_bswap (uint8_t v) { return v; } inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); } inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); } inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); } template inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; } template inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; } template inline T ecb_peek (const void *ptr) { return *(const T *)ptr; } template inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek (ptr)); } template inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek (ptr)); } template inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; } template inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u (ptr)); } template inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u (ptr)); } template inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; } template inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; } template inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; } template inline void ecb_poke_be (void *ptr, T v) { return ecb_poke (ptr, ecb_host_to_be (v)); } template inline void ecb_poke_le (void *ptr, T v) { return ecb_poke (ptr, ecb_host_to_le (v)); } template inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); } template inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u (ptr, ecb_host_to_be (v)); } template inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u (ptr, ecb_host_to_le (v)); } #endif /*****************************************************************************/ #if ECB_GCC_VERSION(3,0) || ECB_C99 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) #else #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) #endif #if ECB_CPP template static inline T ecb_div_rd (T val, T div) { return val < 0 ? - ((-val + div - 1) / div) : (val ) / div; } template static inline T ecb_div_ru (T val, T div) { return val < 0 ? - ((-val ) / div) : (val + div - 1) / div; } #else #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div)) #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div)) #endif #if ecb_cplusplus_does_not_suck /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */ template static inline int ecb_array_length (const T (&arr)[N]) { return N; } #else #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) #endif /*****************************************************************************/ ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x); ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x) { unsigned int s = (x & 0x8000) << (31 - 15); int e = (x >> 10) & 0x001f; unsigned int m = x & 0x03ff; if (ecb_expect_false (e == 31)) /* infinity or NaN */ e = 255 - (127 - 15); else if (ecb_expect_false (!e)) { if (ecb_expect_true (!m)) /* zero, handled by code below by forcing e to 0 */ e = 0 - (127 - 15); else { /* subnormal, renormalise */ unsigned int s = 10 - ecb_ld32 (m); m = (m << s) & 0x3ff; /* mask implicit bit */ e -= s - 1; } } /* e and m now are normalised, or zero, (or inf or nan) */ e += 127 - 15; return s | (e << 23) | (m << (23 - 10)); } ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x); ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x) { unsigned int s = (x >> 16) & 0x00008000; /* sign bit, the easy part */ unsigned int e = ((x >> 23) & 0x000000ff) - (127 - 15); /* the desired exponent */ unsigned int m = x & 0x007fffff; x &= 0x7fffffff; /* if it's within range of binary16 normals, use fast path */ if (ecb_expect_true (0x38800000 <= x && x <= 0x477fefff)) { /* mantissa round-to-even */ m += 0x00000fff + ((m >> (23 - 10)) & 1); /* handle overflow */ if (ecb_expect_false (m >= 0x00800000)) { m >>= 1; e += 1; } return s | (e << 10) | (m >> (23 - 10)); } /* handle large numbers and infinity */ if (ecb_expect_true (0x477fefff < x && x <= 0x7f800000)) return s | 0x7c00; /* handle zero, subnormals and small numbers */ if (ecb_expect_true (x < 0x38800000)) { /* zero */ if (ecb_expect_true (!x)) return s; /* handle subnormals */ /* too small, will be zero */ if (e < (14 - 24)) /* might not be sharp, but is good enough */ return s; m |= 0x00800000; /* make implicit bit explicit */ /* very tricky - we need to round to the nearest e (+10) bit value */ { unsigned int bits = 14 - e; unsigned int half = (1 << (bits - 1)) - 1; unsigned int even = (m >> bits) & 1; /* if this overflows, we will end up with a normalised number */ m = (m + half + even) >> bits; } return s | m; } /* handle NaNs, preserve leftmost nan bits, but make sure we don't turn them into infinities */ m >>= 13; return s | 0x7c00 | m | !m; } /*******************************************************************************/ /* floating point stuff, can be disabled by defining ECB_NO_LIBM */ /* basically, everything uses "ieee pure-endian" floating point numbers */ /* the only noteworthy exception is ancient armle, which uses order 43218765 */ #if 0 \ || __i386 || __i386__ \ || ECB_GCC_AMD64 \ || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \ || defined __s390__ || defined __s390x__ \ || defined __mips__ \ || defined __alpha__ \ || defined __hppa__ \ || defined __ia64__ \ || defined __m68k__ \ || defined __m88k__ \ || defined __sh__ \ || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \ || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \ || defined __aarch64__ #define ECB_STDFP 1 #else #define ECB_STDFP 0 #endif #ifndef ECB_NO_LIBM #include /* for frexp*, ldexp*, INFINITY, NAN */ /* only the oldest of old doesn't have this one. solaris. */ #ifdef INFINITY #define ECB_INFINITY INFINITY #else #define ECB_INFINITY HUGE_VAL #endif #ifdef NAN #define ECB_NAN NAN #else #define ECB_NAN ECB_INFINITY #endif #if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L #define ecb_ldexpf(x,e) ldexpf ((x), (e)) #define ecb_frexpf(x,e) frexpf ((x), (e)) #else #define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e)) #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e)) #endif /* convert a float to ieee single/binary32 */ ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x); ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x) { uint32_t r; #if ECB_STDFP memcpy (&r, &x, 4); #else /* slow emulation, works for anything but -0 */ uint32_t m; int e; if (x == 0e0f ) return 0x00000000U; if (x > +3.40282346638528860e+38f) return 0x7f800000U; if (x < -3.40282346638528860e+38f) return 0xff800000U; if (x != x ) return 0x7fbfffffU; m = ecb_frexpf (x, &e) * 0x1000000U; r = m & 0x80000000U; if (r) m = -m; if (e <= -126) { m &= 0xffffffU; m >>= (-125 - e); e = -126; } r |= (e + 126) << 23; r |= m & 0x7fffffU; #endif return r; } /* converts an ieee single/binary32 to a float */ ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x); ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x) { float r; #if ECB_STDFP memcpy (&r, &x, 4); #else /* emulation, only works for normals and subnormals and +0 */ int neg = x >> 31; int e = (x >> 23) & 0xffU; x &= 0x7fffffU; if (e) x |= 0x800000U; else e = 1; /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */ r = ecb_ldexpf (x * (0.5f / 0x800000U), e - 126); r = neg ? -r : r; #endif return r; } /* convert a double to ieee double/binary64 */ ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x); ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x) { uint64_t r; #if ECB_STDFP memcpy (&r, &x, 8); #else /* slow emulation, works for anything but -0 */ uint64_t m; int e; if (x == 0e0 ) return 0x0000000000000000U; if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U; if (x < -1.79769313486231470e+308) return 0xfff0000000000000U; if (x != x ) return 0X7ff7ffffffffffffU; m = frexp (x, &e) * 0x20000000000000U; r = m & 0x8000000000000000;; if (r) m = -m; if (e <= -1022) { m &= 0x1fffffffffffffU; m >>= (-1021 - e); e = -1022; } r |= ((uint64_t)(e + 1022)) << 52; r |= m & 0xfffffffffffffU; #endif return r; } /* converts an ieee double/binary64 to a double */ ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x); ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x) { double r; #if ECB_STDFP memcpy (&r, &x, 8); #else /* emulation, only works for normals and subnormals and +0 */ int neg = x >> 63; int e = (x >> 52) & 0x7ffU; x &= 0xfffffffffffffU; if (e) x |= 0x10000000000000U; else e = 1; /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */ r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022); r = neg ? -r : r; #endif return r; } /* convert a float to ieee half/binary16 */ ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x); ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x) { return ecb_binary32_to_binary16 (ecb_float_to_binary32 (x)); } /* convert an ieee half/binary16 to float */ ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x); ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x) { return ecb_binary32_to_float (ecb_binary16_to_binary32 (x)); } #endif #endif /* ECB.H END */ #if ECB_MEMORY_FENCE_NEEDS_PTHREADS /* if your architecture doesn't need memory fences, e.g. because it is * single-cpu/core, or if you use libev in a project that doesn't use libev * from multiple threads, then you can define ECB_NO_THREADS when compiling * libev, in which cases the memory fences become nops. * alternatively, you can remove this #error and link against libpthread, * which will then provide the memory fences. */ # error "memory fences not defined for your architecture, please report" #endif #ifndef ECB_MEMORY_FENCE # define ECB_MEMORY_FENCE do { } while (0) # define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE # define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE #endif #define inline_size ecb_inline #if EV_FEATURE_CODE # define inline_speed ecb_inline #else # define inline_speed ecb_noinline static #endif /*****************************************************************************/ /* raw syscall wrappers */ #if EV_NEED_SYSCALL #include /* * define some syscall wrappers for common architectures * this is mostly for nice looks during debugging, not performance. * our syscalls return < 0, not == -1, on error. which is good * enough for linux aio. * TODO: arm is also common nowadays, maybe even mips and x86 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove... */ #if __GNUC__ && __linux && ECB_AMD64 && !EV_FEATURE_CODE /* the costly errno access probably kills this for size optimisation */ #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \ ({ \ long res; \ register unsigned long r6 __asm__ ("r9" ); \ register unsigned long r5 __asm__ ("r8" ); \ register unsigned long r4 __asm__ ("r10"); \ register unsigned long r3 __asm__ ("rdx"); \ register unsigned long r2 __asm__ ("rsi"); \ register unsigned long r1 __asm__ ("rdi"); \ if (narg >= 6) r6 = (unsigned long)(arg6); \ if (narg >= 5) r5 = (unsigned long)(arg5); \ if (narg >= 4) r4 = (unsigned long)(arg4); \ if (narg >= 3) r3 = (unsigned long)(arg3); \ if (narg >= 2) r2 = (unsigned long)(arg2); \ if (narg >= 1) r1 = (unsigned long)(arg1); \ __asm__ __volatile__ ( \ "syscall\n\t" \ : "=a" (res) \ : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \ : "cc", "r11", "cx", "memory"); \ errno = -res; \ res; \ }) #endif #ifdef ev_syscall #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0) #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0) #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0) #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0) #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0) #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0) #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6) #else #define ev_syscall0(nr) syscall (nr) #define ev_syscall1(nr,arg1) syscall (nr, arg1) #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2) #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3) #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4) #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5) #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6) #endif #endif /*****************************************************************************/ #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) #if EV_MINPRI == EV_MAXPRI # define ABSPRI(w) (((W)w), 0) #else # define ABSPRI(w) (((W)w)->priority - EV_MINPRI) #endif #define EMPTY /* required for microsofts broken pseudo-c compiler */ typedef ev_watcher *W; typedef ev_watcher_list *WL; typedef ev_watcher_time *WT; #define ev_active(w) ((W)(w))->active #define ev_at(w) ((WT)(w))->at #if EV_USE_REALTIME /* sig_atomic_t is used to avoid per-thread variables or locking but still */ /* giving it a reasonably high chance of working on typical architectures */ static EV_ATOMIC_T have_realtime; /* did clock_gettime (CLOCK_REALTIME) work? */ #endif #if EV_USE_MONOTONIC static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */ #endif #ifndef EV_FD_TO_WIN32_HANDLE # define EV_FD_TO_WIN32_HANDLE(fd) _get_osfhandle (fd) #endif #ifndef EV_WIN32_HANDLE_TO_FD # define EV_WIN32_HANDLE_TO_FD(handle) _open_osfhandle (handle, 0) #endif #ifndef EV_WIN32_CLOSE_FD # define EV_WIN32_CLOSE_FD(fd) close (fd) #endif #ifdef _WIN32 # include "ev_win32.c" #endif /*****************************************************************************/ #if EV_USE_LINUXAIO # include /* probably only needed for aio_context_t */ #endif /* define a suitable floor function (only used by periodics atm) */ #if EV_USE_FLOOR # include # define ev_floor(v) floor (v) #else #include /* a floor() replacement function, should be independent of ev_tstamp type */ ecb_noinline static ev_tstamp ev_floor (ev_tstamp v) { /* the choice of shift factor is not terribly important */ #if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */ const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.; #else const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.; #endif /* special treatment for negative arguments */ if (ecb_expect_false (v < 0.)) { ev_tstamp f = -ev_floor (-v); return f - (f == v ? 0 : 1); } /* argument too large for an unsigned long? then reduce it */ if (ecb_expect_false (v >= shift)) { ev_tstamp f; if (v == v - 1.) return v; /* very large numbers are assumed to be integer */ f = shift * ev_floor (v * (1. / shift)); return f + ev_floor (v - f); } /* fits into an unsigned long */ return (unsigned long)v; } #endif /*****************************************************************************/ #ifdef __linux # include #endif ecb_noinline ecb_cold static unsigned int ev_linux_version (void) { #ifdef __linux unsigned int v = 0; struct utsname buf; int i; char *p = buf.release; if (uname (&buf)) return 0; for (i = 3+1; --i; ) { unsigned int c = 0; for (;;) { if (*p >= '0' && *p <= '9') c = c * 10 + *p++ - '0'; else { p += *p == '.'; break; } } v = (v << 8) | c; } return v; #else return 0; #endif } /*****************************************************************************/ #if EV_AVOID_STDIO ecb_noinline ecb_cold static void ev_printerr (const char *msg) { write (STDERR_FILENO, msg, strlen (msg)); } #endif static void (*syserr_cb)(const char *msg) EV_NOEXCEPT; ecb_cold void ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT { syserr_cb = cb; } ecb_noinline ecb_cold static void ev_syserr (const char *msg) { if (!msg) msg = "(libev) system error"; if (syserr_cb) syserr_cb (msg); else { #if EV_AVOID_STDIO ev_printerr (msg); ev_printerr (": "); ev_printerr (strerror (errno)); ev_printerr ("\n"); #else perror (msg); #endif abort (); } } static void * ev_realloc_emul (void *ptr, size_t size) EV_NOEXCEPT { /* some systems, notably openbsd and darwin, fail to properly * implement realloc (x, 0) (as required by both ansi c-89 and * the single unix specification, so work around them here. * recently, also (at least) fedora and debian started breaking it, * despite documenting it otherwise. */ if (size) return realloc (ptr, size); free (ptr); return 0; } static void *(*alloc)(void *ptr, size_t size) EV_NOEXCEPT = ev_realloc_emul; ecb_cold void ev_set_allocator (void *(*cb)(void *ptr, size_t size) EV_NOEXCEPT) EV_NOEXCEPT { alloc = cb; } inline_speed void * ev_realloc (void *ptr, size_t size) { ptr = alloc (ptr, size); if (!ptr && size) { #if EV_AVOID_STDIO ev_printerr ("(libev) memory allocation failed, aborting.\n"); #else fprintf (stderr, "(libev) cannot allocate %ld bytes, aborting.", size); #endif abort (); } return ptr; } #define ev_malloc(size) ev_realloc (0, (size)) #define ev_free(ptr) ev_realloc ((ptr), 0) /*****************************************************************************/ /* set in reify when reification needed */ #define EV_ANFD_REIFY 1 /* file descriptor info structure */ typedef struct { WL head; unsigned char events; /* the events watched for */ unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */ unsigned char emask; /* some backends store the actual kernel mask in here */ unsigned char eflags; /* flags field for use by backends */ #if EV_USE_EPOLL unsigned int egen; /* generation counter to counter epoll bugs */ #endif #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP SOCKET handle; #endif #if EV_USE_IOCP OVERLAPPED or, ow; #endif } ANFD; /* stores the pending event set for a given watcher */ typedef struct { W w; int events; /* the pending event set for the given watcher */ } ANPENDING; #if EV_USE_INOTIFY /* hash table entry per inotify-id */ typedef struct { WL head; } ANFS; #endif /* Heap Entry */ #if EV_HEAP_CACHE_AT /* a heap element */ typedef struct { ev_tstamp at; WT w; } ANHE; #define ANHE_w(he) (he).w /* access watcher, read-write */ #define ANHE_at(he) (he).at /* access cached at, read-only */ #define ANHE_at_cache(he) (he).at = (he).w->at /* update at from watcher */ #else /* a heap element */ typedef WT ANHE; #define ANHE_w(he) (he) #define ANHE_at(he) (he)->at #define ANHE_at_cache(he) #endif #if EV_MULTIPLICITY struct ev_loop { ev_tstamp ev_rt_now; #define ev_rt_now ((loop)->ev_rt_now) #define VAR(name,decl) decl; #include "ev_vars.h" #undef VAR }; #include "ev_wrap.h" static struct ev_loop default_loop_struct; EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */ #else EV_API_DECL ev_tstamp ev_rt_now = EV_TS_CONST (0.); /* needs to be initialised to make it a definition despite extern */ #define VAR(name,decl) static decl; #include "ev_vars.h" #undef VAR static int ev_default_loop_ptr; #endif #if EV_FEATURE_API # define EV_RELEASE_CB if (ecb_expect_false (release_cb)) release_cb (EV_A) # define EV_ACQUIRE_CB if (ecb_expect_false (acquire_cb)) acquire_cb (EV_A) # define EV_INVOKE_PENDING invoke_cb (EV_A) #else # define EV_RELEASE_CB (void)0 # define EV_ACQUIRE_CB (void)0 # define EV_INVOKE_PENDING ev_invoke_pending (EV_A) #endif #define EVBREAK_RECURSE 0x80 /*****************************************************************************/ #ifndef EV_HAVE_EV_TIME ev_tstamp ev_time (void) EV_NOEXCEPT { #if EV_USE_REALTIME if (ecb_expect_true (have_realtime)) { struct timespec ts; clock_gettime (CLOCK_REALTIME, &ts); return EV_TS_GET (ts); } #endif { struct timeval tv; gettimeofday (&tv, 0); return EV_TV_GET (tv); } } #endif inline_size ev_tstamp get_clock (void) { #if EV_USE_MONOTONIC if (ecb_expect_true (have_monotonic)) { struct timespec ts; clock_gettime (CLOCK_MONOTONIC, &ts); return EV_TS_GET (ts); } #endif return ev_time (); } #if EV_MULTIPLICITY ev_tstamp ev_now (EV_P) EV_NOEXCEPT { return ev_rt_now; } #endif void ev_sleep (ev_tstamp delay) EV_NOEXCEPT { if (delay > EV_TS_CONST (0.)) { #if EV_USE_NANOSLEEP struct timespec ts; EV_TS_SET (ts, delay); nanosleep (&ts, 0); #elif defined _WIN32 /* maybe this should round up, as ms is very low resolution */ /* compared to select (µs) or nanosleep (ns) */ Sleep ((unsigned long)(EV_TS_TO_MSEC (delay))); #else struct timeval tv; /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */ /* something not guaranteed by newer posix versions, but guaranteed */ /* by older ones */ EV_TV_SET (tv, delay); select (0, 0, 0, 0, &tv); #endif } } /*****************************************************************************/ #define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */ /* find a suitable new size for the given array, */ /* hopefully by rounding to a nice-to-malloc size */ inline_size int array_nextsize (int elem, int cur, int cnt) { int ncur = cur + 1; do ncur <<= 1; while (cnt > ncur); /* if size is large, round to MALLOC_ROUND - 4 * longs to accommodate malloc overhead */ if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4) { ncur *= elem; ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1); ncur = ncur - sizeof (void *) * 4; ncur /= elem; } return ncur; } ecb_noinline ecb_cold static void * array_realloc (int elem, void *base, int *cur, int cnt) { *cur = array_nextsize (elem, *cur, cnt); return ev_realloc (base, elem * *cur); } #define array_needsize_noinit(base,offset,count) #define array_needsize_zerofill(base,offset,count) \ memset ((void *)(base + offset), 0, sizeof (*(base)) * (count)) #define array_needsize(type,base,cur,cnt,init) \ if (ecb_expect_false ((cnt) > (cur))) \ { \ ecb_unused int ocur_ = (cur); \ (base) = (type *)array_realloc \ (sizeof (type), (base), &(cur), (cnt)); \ init ((base), ocur_, ((cur) - ocur_)); \ } #if 0 #define array_slim(type,stem) \ if (stem ## max < array_roundsize (stem ## cnt >> 2)) \ { \ stem ## max = array_roundsize (stem ## cnt >> 1); \ base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\ fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\ } #endif #define array_free(stem, idx) \ ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0 /*****************************************************************************/ /* dummy callback for pending events */ ecb_noinline static void pendingcb (EV_P_ ev_prepare *w, int revents) { } ecb_noinline void ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT { W w_ = (W)w; int pri = ABSPRI (w_); if (ecb_expect_false (w_->pending)) pendings [pri][w_->pending - 1].events |= revents; else { w_->pending = ++pendingcnt [pri]; array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, array_needsize_noinit); pendings [pri][w_->pending - 1].w = w_; pendings [pri][w_->pending - 1].events = revents; } pendingpri = NUMPRI - 1; } inline_speed void feed_reverse (EV_P_ W w) { array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, array_needsize_noinit); rfeeds [rfeedcnt++] = w; } inline_size void feed_reverse_done (EV_P_ int revents) { do ev_feed_event (EV_A_ rfeeds [--rfeedcnt], revents); while (rfeedcnt); } inline_speed void queue_events (EV_P_ W *events, int eventcnt, int type) { int i; for (i = 0; i < eventcnt; ++i) ev_feed_event (EV_A_ events [i], type); } /*****************************************************************************/ inline_speed void fd_event_nocheck (EV_P_ int fd, int revents) { ANFD *anfd = anfds + fd; ev_io *w; for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next) { int ev = w->events & revents; if (ev) ev_feed_event (EV_A_ (W)w, ev); } } /* do not submit kernel events for fds that have reify set */ /* because that means they changed while we were polling for new events */ inline_speed void fd_event (EV_P_ int fd, int revents) { ANFD *anfd = anfds + fd; if (ecb_expect_true (!anfd->reify)) fd_event_nocheck (EV_A_ fd, revents); } void ev_feed_fd_event (EV_P_ int fd, int revents) EV_NOEXCEPT { if (fd >= 0 && fd < anfdmax) fd_event_nocheck (EV_A_ fd, revents); } /* make sure the external fd watch events are in-sync */ /* with the kernel/libev internal state */ inline_size void fd_reify (EV_P) { int i; /* most backends do not modify the fdchanges list in backend_modfiy. * except io_uring, which has fixed-size buffers which might force us * to handle events in backend_modify, causing fdchanges to be amended, * which could result in an endless loop. * to avoid this, we do not dynamically handle fds that were added * during fd_reify. that means that for those backends, fdchangecnt * might be non-zero during poll, which must cause them to not block. * to not put too much of a burden on other backends, this detail * needs to be handled in the backend. */ int changecnt = fdchangecnt; #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP for (i = 0; i < changecnt; ++i) { int fd = fdchanges [i]; ANFD *anfd = anfds + fd; if (anfd->reify & EV__IOFDSET && anfd->head) { SOCKET handle = EV_FD_TO_WIN32_HANDLE (fd); if (handle != anfd->handle) { unsigned long arg; assert (("libev: only socket fds supported in this configuration", ioctlsocket (handle, FIONREAD, &arg) == 0)); /* handle changed, but fd didn't - we need to do it in two steps */ backend_modify (EV_A_ fd, anfd->events, 0); anfd->events = 0; anfd->handle = handle; } } } #endif for (i = 0; i < changecnt; ++i) { int fd = fdchanges [i]; ANFD *anfd = anfds + fd; ev_io *w; unsigned char o_events = anfd->events; unsigned char o_reify = anfd->reify; anfd->reify = 0; /*if (ecb_expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */ { anfd->events = 0; for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next) anfd->events |= (unsigned char)w->events; if (o_events != anfd->events) o_reify = EV__IOFDSET; /* actually |= */ } if (o_reify & EV__IOFDSET) backend_modify (EV_A_ fd, o_events, anfd->events); } /* normally, fdchangecnt hasn't changed. if it has, then new fds have been added. * this is a rare case (see beginning comment in this function), so we copy them to the * front and hope the backend handles this case. */ if (ecb_expect_false (fdchangecnt != changecnt)) memmove (fdchanges, fdchanges + changecnt, (fdchangecnt - changecnt) * sizeof (*fdchanges)); fdchangecnt -= changecnt; } /* something about the given fd changed */ inline_size void fd_change (EV_P_ int fd, int flags) { unsigned char reify = anfds [fd].reify; anfds [fd].reify = reify | flags; if (ecb_expect_true (!reify)) { ++fdchangecnt; array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit); fdchanges [fdchangecnt - 1] = fd; } } /* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */ inline_speed ecb_cold void fd_kill (EV_P_ int fd) { ev_io *w; while ((w = (ev_io *)anfds [fd].head)) { ev_io_stop (EV_A_ w); ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE); } } /* check whether the given fd is actually valid, for error recovery */ inline_size ecb_cold int fd_valid (int fd) { #ifdef _WIN32 return EV_FD_TO_WIN32_HANDLE (fd) != -1; #else return fcntl (fd, F_GETFD) != -1; #endif } /* called on EBADF to verify fds */ ecb_noinline ecb_cold static void fd_ebadf (EV_P) { int fd; for (fd = 0; fd < anfdmax; ++fd) if (anfds [fd].events) if (!fd_valid (fd) && errno == EBADF) fd_kill (EV_A_ fd); } /* called on ENOMEM in select/poll to kill some fds and retry */ ecb_noinline ecb_cold static void fd_enomem (EV_P) { int fd; for (fd = anfdmax; fd--; ) if (anfds [fd].events) { fd_kill (EV_A_ fd); break; } } /* usually called after fork if backend needs to re-arm all fds from scratch */ ecb_noinline static void fd_rearm_all (EV_P) { int fd; for (fd = 0; fd < anfdmax; ++fd) if (anfds [fd].events) { anfds [fd].events = 0; anfds [fd].emask = 0; fd_change (EV_A_ fd, EV__IOFDSET | EV_ANFD_REIFY); } } /* used to prepare libev internal fd's */ /* this is not fork-safe */ inline_speed void fd_intern (int fd) { #ifdef _WIN32 unsigned long arg = 1; ioctlsocket (EV_FD_TO_WIN32_HANDLE (fd), FIONBIO, &arg); #else fcntl (fd, F_SETFD, FD_CLOEXEC); fcntl (fd, F_SETFL, O_NONBLOCK); #endif } /*****************************************************************************/ /* * the heap functions want a real array index. array index 0 is guaranteed to not * be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives * the branching factor of the d-tree. */ /* * at the moment we allow libev the luxury of two heaps, * a small-code-size 2-heap one and a ~1.5kb larger 4-heap * which is more cache-efficient. * the difference is about 5% with 50000+ watchers. */ #if EV_USE_4HEAP #define DHEAP 4 #define HEAP0 (DHEAP - 1) /* index of first element in heap */ #define HPARENT(k) ((((k) - HEAP0 - 1) / DHEAP) + HEAP0) #define UPHEAP_DONE(p,k) ((p) == (k)) /* away from the root */ inline_speed void downheap (ANHE *heap, int N, int k) { ANHE he = heap [k]; ANHE *E = heap + N + HEAP0; for (;;) { ev_tstamp minat; ANHE *minpos; ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1; /* find minimum child */ if (ecb_expect_true (pos + DHEAP - 1 < E)) { /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); if ( minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos)); if ( minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos)); if ( minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos)); } else if (pos < E) { /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos)); if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos)); if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos)); } else break; if (ANHE_at (he) <= minat) break; heap [k] = *minpos; ev_active (ANHE_w (*minpos)) = k; k = minpos - heap; } heap [k] = he; ev_active (ANHE_w (he)) = k; } #else /* not 4HEAP */ #define HEAP0 1 #define HPARENT(k) ((k) >> 1) #define UPHEAP_DONE(p,k) (!(p)) /* away from the root */ inline_speed void downheap (ANHE *heap, int N, int k) { ANHE he = heap [k]; for (;;) { int c = k << 1; if (c >= N + HEAP0) break; c += c + 1 < N + HEAP0 && ANHE_at (heap [c]) > ANHE_at (heap [c + 1]) ? 1 : 0; if (ANHE_at (he) <= ANHE_at (heap [c])) break; heap [k] = heap [c]; ev_active (ANHE_w (heap [k])) = k; k = c; } heap [k] = he; ev_active (ANHE_w (he)) = k; } #endif /* towards the root */ inline_speed void upheap (ANHE *heap, int k) { ANHE he = heap [k]; for (;;) { int p = HPARENT (k); if (UPHEAP_DONE (p, k) || ANHE_at (heap [p]) <= ANHE_at (he)) break; heap [k] = heap [p]; ev_active (ANHE_w (heap [k])) = k; k = p; } heap [k] = he; ev_active (ANHE_w (he)) = k; } /* move an element suitably so it is in a correct place */ inline_size void adjustheap (ANHE *heap, int N, int k) { if (k > HEAP0 && ANHE_at (heap [k]) <= ANHE_at (heap [HPARENT (k)])) upheap (heap, k); else downheap (heap, N, k); } /* rebuild the heap: this function is used only once and executed rarely */ inline_size void reheap (ANHE *heap, int N) { int i; /* we don't use floyds algorithm, upheap is simpler and is more cache-efficient */ /* also, this is easy to implement and correct for both 2-heaps and 4-heaps */ for (i = 0; i < N; ++i) upheap (heap, i + HEAP0); } /*****************************************************************************/ /* associate signal watchers to a signal */ typedef struct { EV_ATOMIC_T pending; #if EV_MULTIPLICITY EV_P; #endif WL head; } ANSIG; static ANSIG signals [EV_NSIG - 1]; /*****************************************************************************/ #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE ecb_noinline ecb_cold static void evpipe_init (EV_P) { if (!ev_is_active (&pipe_w)) { int fds [2]; # if EV_USE_EVENTFD fds [0] = -1; fds [1] = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC); if (fds [1] < 0 && errno == EINVAL) fds [1] = eventfd (0, 0); if (fds [1] < 0) # endif { while (pipe (fds)) ev_syserr ("(libev) error creating signal/async pipe"); fd_intern (fds [0]); } evpipe [0] = fds [0]; if (evpipe [1] < 0) evpipe [1] = fds [1]; /* first call, set write fd */ else { /* on subsequent calls, do not change evpipe [1] */ /* so that evpipe_write can always rely on its value. */ /* this branch does not do anything sensible on windows, */ /* so must not be executed on windows */ dup2 (fds [1], evpipe [1]); close (fds [1]); } fd_intern (evpipe [1]); ev_io_set (&pipe_w, evpipe [0] < 0 ? evpipe [1] : evpipe [0], EV_READ); ev_io_start (EV_A_ &pipe_w); ev_unref (EV_A); /* watcher should not keep loop alive */ } } inline_speed void evpipe_write (EV_P_ EV_ATOMIC_T *flag) { ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */ if (ecb_expect_true (*flag)) return; *flag = 1; ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */ pipe_write_skipped = 1; ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */ if (pipe_write_wanted) { int old_errno; pipe_write_skipped = 0; ECB_MEMORY_FENCE_RELEASE; old_errno = errno; /* save errno because write will clobber it */ #if EV_USE_EVENTFD if (evpipe [0] < 0) { uint64_t counter = 1; write (evpipe [1], &counter, sizeof (uint64_t)); } else #endif { #ifdef _WIN32 WSABUF buf; DWORD sent; buf.buf = (char *)&buf; buf.len = 1; WSASend (EV_FD_TO_WIN32_HANDLE (evpipe [1]), &buf, 1, &sent, 0, 0, 0); #else write (evpipe [1], &(evpipe [1]), 1); #endif } errno = old_errno; } } /* called whenever the libev signal pipe */ /* got some events (signal, async) */ static void pipecb (EV_P_ ev_io *iow, int revents) { int i; if (revents & EV_READ) { #if EV_USE_EVENTFD if (evpipe [0] < 0) { uint64_t counter; read (evpipe [1], &counter, sizeof (uint64_t)); } else #endif { char dummy[4]; #ifdef _WIN32 WSABUF buf; DWORD recvd; DWORD flags = 0; buf.buf = dummy; buf.len = sizeof (dummy); WSARecv (EV_FD_TO_WIN32_HANDLE (evpipe [0]), &buf, 1, &recvd, &flags, 0, 0); #else read (evpipe [0], &dummy, sizeof (dummy)); #endif } } pipe_write_skipped = 0; ECB_MEMORY_FENCE; /* push out skipped, acquire flags */ #if EV_SIGNAL_ENABLE if (sig_pending) { sig_pending = 0; ECB_MEMORY_FENCE; for (i = EV_NSIG - 1; i--; ) if (ecb_expect_false (signals [i].pending)) ev_feed_signal_event (EV_A_ i + 1); } #endif #if EV_ASYNC_ENABLE if (async_pending) { async_pending = 0; ECB_MEMORY_FENCE; for (i = asynccnt; i--; ) if (asyncs [i]->sent) { asyncs [i]->sent = 0; ECB_MEMORY_FENCE_RELEASE; ev_feed_event (EV_A_ asyncs [i], EV_ASYNC); } } #endif } /*****************************************************************************/ void ev_feed_signal (int signum) EV_NOEXCEPT { #if EV_MULTIPLICITY EV_P; ECB_MEMORY_FENCE_ACQUIRE; EV_A = signals [signum - 1].loop; if (!EV_A) return; #endif signals [signum - 1].pending = 1; evpipe_write (EV_A_ &sig_pending); } static void ev_sighandler (int signum) { #ifdef _WIN32 signal (signum, ev_sighandler); #endif ev_feed_signal (signum); } ecb_noinline void ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT { WL w; if (ecb_expect_false (signum <= 0 || signum >= EV_NSIG)) return; --signum; #if EV_MULTIPLICITY /* it is permissible to try to feed a signal to the wrong loop */ /* or, likely more useful, feeding a signal nobody is waiting for */ if (ecb_expect_false (signals [signum].loop != EV_A)) return; #endif signals [signum].pending = 0; ECB_MEMORY_FENCE_RELEASE; for (w = signals [signum].head; w; w = w->next) ev_feed_event (EV_A_ (W)w, EV_SIGNAL); } #if EV_USE_SIGNALFD static void sigfdcb (EV_P_ ev_io *iow, int revents) { struct signalfd_siginfo si[2], *sip; /* these structs are big */ for (;;) { ssize_t res = read (sigfd, si, sizeof (si)); /* not ISO-C, as res might be -1, but works with SuS */ for (sip = si; (char *)sip < (char *)si + res; ++sip) ev_feed_signal_event (EV_A_ sip->ssi_signo); if (res < (ssize_t)sizeof (si)) break; } } #endif #endif /*****************************************************************************/ #if EV_CHILD_ENABLE static WL childs [EV_PID_HASHSIZE]; static ev_signal childev; #ifndef WIFCONTINUED # define WIFCONTINUED(status) 0 #endif /* handle a single child status event */ inline_speed void child_reap (EV_P_ int chain, int pid, int status) { ev_child *w; int traced = WIFSTOPPED (status) || WIFCONTINUED (status); for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next) { if ((w->pid == pid || !w->pid) && (!traced || (w->flags & 1))) { ev_set_priority (w, EV_MAXPRI); /* need to do it *now*, this *must* be the same prio as the signal watcher itself */ w->rpid = pid; w->rstatus = status; ev_feed_event (EV_A_ (W)w, EV_CHILD); } } } #ifndef WCONTINUED # define WCONTINUED 0 #endif /* called on sigchld etc., calls waitpid */ static void childcb (EV_P_ ev_signal *sw, int revents) { int pid, status; /* some systems define WCONTINUED but then fail to support it (linux 2.4) */ if (0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED))) if (!WCONTINUED || errno != EINVAL || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED))) return; /* make sure we are called again until all children have been reaped */ /* we need to do it this way so that the callback gets called before we continue */ ev_feed_event (EV_A_ (W)sw, EV_SIGNAL); child_reap (EV_A_ pid, pid, status); if ((EV_PID_HASHSIZE) > 1) child_reap (EV_A_ 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */ } #endif /*****************************************************************************/ #if EV_USE_TIMERFD static void periodics_reschedule (EV_P); static void timerfdcb (EV_P_ ev_io *iow, int revents) { struct itimerspec its = { 0 }; its.it_value.tv_sec = ev_rt_now + (int)MAX_BLOCKTIME2; timerfd_settime (timerfd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &its, 0); ev_rt_now = ev_time (); /* periodics_reschedule only needs ev_rt_now */ /* but maybe in the future we want the full treatment. */ /* now_floor = EV_TS_CONST (0.); time_update (EV_A_ EV_TSTAMP_HUGE); */ #if EV_PERIODIC_ENABLE periodics_reschedule (EV_A); #endif } ecb_noinline ecb_cold static void evtimerfd_init (EV_P) { if (!ev_is_active (&timerfd_w)) { timerfd = timerfd_create (CLOCK_REALTIME, TFD_NONBLOCK | TFD_CLOEXEC); if (timerfd >= 0) { fd_intern (timerfd); /* just to be sure */ ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ); ev_set_priority (&timerfd_w, EV_MINPRI); ev_io_start (EV_A_ &timerfd_w); ev_unref (EV_A); /* watcher should not keep loop alive */ /* (re-) arm timer */ timerfdcb (EV_A_ 0, 0); } } } #endif /*****************************************************************************/ #if EV_USE_IOCP # include "ev_iocp.c" #endif #if EV_USE_PORT # include "ev_port.c" #endif #if EV_USE_KQUEUE # include "ev_kqueue.c" #endif #if EV_USE_EPOLL # include "ev_epoll.c" #endif #if EV_USE_LINUXAIO # include "ev_linuxaio.c" #endif #if EV_USE_IOURING # include "ev_iouring.c" #endif #if EV_USE_POLL # include "ev_poll.c" #endif #if EV_USE_SELECT # include "ev_select.c" #endif ecb_cold int ev_version_major (void) EV_NOEXCEPT { return EV_VERSION_MAJOR; } ecb_cold int ev_version_minor (void) EV_NOEXCEPT { return EV_VERSION_MINOR; } /* return true if we are running with elevated privileges and should ignore env variables */ inline_size ecb_cold int enable_secure (void) { #ifdef _WIN32 return 0; #else return getuid () != geteuid () || getgid () != getegid (); #endif } ecb_cold unsigned int ev_supported_backends (void) EV_NOEXCEPT { unsigned int flags = 0; if (EV_USE_PORT ) flags |= EVBACKEND_PORT; if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE; if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL; if (EV_USE_LINUXAIO && ev_linux_version () >= 0x041300) flags |= EVBACKEND_LINUXAIO; /* 4.19+ */ if (EV_USE_IOURING && ev_linux_version () >= 0x050601 ) flags |= EVBACKEND_IOURING; /* 5.6.1+ */ if (EV_USE_POLL ) flags |= EVBACKEND_POLL; if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT; return flags; } ecb_cold unsigned int ev_recommended_backends (void) EV_NOEXCEPT { unsigned int flags = ev_supported_backends (); /* apple has a poor track record but post 10.12.2 it seems to work sufficiently well */ #if defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_14) /* only select works correctly on that "unix-certified" platform */ flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */ flags &= ~EVBACKEND_POLL; /* poll is based on kqueue from 10.5 onwards */ #endif #if !defined(__NetBSD__) && !defined(__APPLE__) /* kqueue is borked on everything but netbsd and osx >= 10.12.2 apparently */ /* it usually doesn't work correctly on anything but sockets and pipes */ flags &= ~EVBACKEND_KQUEUE; #endif #ifdef __FreeBSD__ flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */ #endif #ifdef __linux__ /* NOTE: linuxaio is very experimental, never recommend */ flags &= ~EVBACKEND_LINUXAIO; /* NOTE: io_uring is super experimental, never recommend */ flags &= ~EVBACKEND_IOURING; #endif return flags; } ecb_cold unsigned int ev_embeddable_backends (void) EV_NOEXCEPT { int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT | EVBACKEND_IOURING; /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ flags &= ~EVBACKEND_EPOLL; /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */ return flags; } unsigned int ev_backend (EV_P) EV_NOEXCEPT { return backend; } #if EV_FEATURE_API unsigned int ev_iteration (EV_P) EV_NOEXCEPT { return loop_count; } unsigned int ev_depth (EV_P) EV_NOEXCEPT { return loop_depth; } void ev_set_io_collect_interval (EV_P_ ev_tstamp interval) EV_NOEXCEPT { io_blocktime = interval; } void ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) EV_NOEXCEPT { timeout_blocktime = interval; } void ev_set_userdata (EV_P_ void *data) EV_NOEXCEPT { userdata = data; } void * ev_userdata (EV_P) EV_NOEXCEPT { return userdata; } void ev_set_invoke_pending_cb (EV_P_ ev_loop_callback invoke_pending_cb) EV_NOEXCEPT { invoke_cb = invoke_pending_cb; } void ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_NOEXCEPT, void (*acquire)(EV_P) EV_NOEXCEPT) EV_NOEXCEPT { release_cb = release; acquire_cb = acquire; } #endif /* initialise a loop structure, must be zero-initialised */ ecb_noinline ecb_cold static void loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT { if (!backend) { origflags = flags; #if EV_USE_REALTIME if (!have_realtime) { struct timespec ts; if (!clock_gettime (CLOCK_REALTIME, &ts)) have_realtime = 1; } #endif #if EV_USE_MONOTONIC if (!have_monotonic) { struct timespec ts; if (!clock_gettime (CLOCK_MONOTONIC, &ts)) have_monotonic = 1; } #endif /* pid check not overridable via env */ #ifndef _WIN32 if (flags & EVFLAG_FORKCHECK) curpid = getpid (); #endif if (!(flags & EVFLAG_NOENV) && !enable_secure () && getenv ("LIBEV_FLAGS")) flags = atoi (getenv ("LIBEV_FLAGS")); ev_rt_now = ev_time (); mn_now = get_clock (); now_floor = mn_now; rtmn_diff = ev_rt_now - mn_now; #if EV_FEATURE_API invoke_cb = ev_invoke_pending; #endif io_blocktime = 0.; timeout_blocktime = 0.; backend = 0; backend_fd = -1; sig_pending = 0; #if EV_ASYNC_ENABLE async_pending = 0; #endif pipe_write_skipped = 0; pipe_write_wanted = 0; evpipe [0] = -1; evpipe [1] = -1; #if EV_USE_INOTIFY fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2; #endif #if EV_USE_SIGNALFD sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1; #endif #if EV_USE_TIMERFD timerfd = flags & EVFLAG_NOTIMERFD ? -1 : -2; #endif if (!(flags & EVBACKEND_MASK)) flags |= ev_recommended_backends (); #if EV_USE_IOCP if (!backend && (flags & EVBACKEND_IOCP )) backend = iocp_init (EV_A_ flags); #endif #if EV_USE_PORT if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags); #endif #if EV_USE_KQUEUE if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags); #endif #if EV_USE_IOURING if (!backend && (flags & EVBACKEND_IOURING )) backend = iouring_init (EV_A_ flags); #endif #if EV_USE_LINUXAIO if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags); #endif #if EV_USE_EPOLL if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags); #endif #if EV_USE_POLL if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags); #endif #if EV_USE_SELECT if (!backend && (flags & EVBACKEND_SELECT )) backend = select_init (EV_A_ flags); #endif ev_prepare_init (&pending_w, pendingcb); #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE ev_init (&pipe_w, pipecb); ev_set_priority (&pipe_w, EV_MAXPRI); #endif } } /* free up a loop structure */ ecb_cold void ev_loop_destroy (EV_P) { int i; #if EV_MULTIPLICITY /* mimic free (0) */ if (!EV_A) return; #endif #if EV_CLEANUP_ENABLE /* queue cleanup watchers (and execute them) */ if (ecb_expect_false (cleanupcnt)) { queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP); EV_INVOKE_PENDING; } #endif #if EV_CHILD_ENABLE if (ev_is_default_loop (EV_A) && ev_is_active (&childev)) { ev_ref (EV_A); /* child watcher */ ev_signal_stop (EV_A_ &childev); } #endif if (ev_is_active (&pipe_w)) { /*ev_ref (EV_A);*/ /*ev_io_stop (EV_A_ &pipe_w);*/ if (evpipe [0] >= 0) EV_WIN32_CLOSE_FD (evpipe [0]); if (evpipe [1] >= 0) EV_WIN32_CLOSE_FD (evpipe [1]); } #if EV_USE_SIGNALFD if (ev_is_active (&sigfd_w)) close (sigfd); #endif #if EV_USE_TIMERFD if (ev_is_active (&timerfd_w)) close (timerfd); #endif #if EV_USE_INOTIFY if (fs_fd >= 0) close (fs_fd); #endif if (backend_fd >= 0) close (backend_fd); #if EV_USE_IOCP if (backend == EVBACKEND_IOCP ) iocp_destroy (EV_A); #endif #if EV_USE_PORT if (backend == EVBACKEND_PORT ) port_destroy (EV_A); #endif #if EV_USE_KQUEUE if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A); #endif #if EV_USE_IOURING if (backend == EVBACKEND_IOURING ) iouring_destroy (EV_A); #endif #if EV_USE_LINUXAIO if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A); #endif #if EV_USE_EPOLL if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A); #endif #if EV_USE_POLL if (backend == EVBACKEND_POLL ) poll_destroy (EV_A); #endif #if EV_USE_SELECT if (backend == EVBACKEND_SELECT ) select_destroy (EV_A); #endif for (i = NUMPRI; i--; ) { array_free (pending, [i]); #if EV_IDLE_ENABLE array_free (idle, [i]); #endif } ev_free (anfds); anfds = 0; anfdmax = 0; /* have to use the microsoft-never-gets-it-right macro */ array_free (rfeed, EMPTY); array_free (fdchange, EMPTY); array_free (timer, EMPTY); #if EV_PERIODIC_ENABLE array_free (periodic, EMPTY); #endif #if EV_FORK_ENABLE array_free (fork, EMPTY); #endif #if EV_CLEANUP_ENABLE array_free (cleanup, EMPTY); #endif array_free (prepare, EMPTY); array_free (check, EMPTY); #if EV_ASYNC_ENABLE array_free (async, EMPTY); #endif backend = 0; #if EV_MULTIPLICITY if (ev_is_default_loop (EV_A)) #endif ev_default_loop_ptr = 0; #if EV_MULTIPLICITY else ev_free (EV_A); #endif } #if EV_USE_INOTIFY inline_size void infy_fork (EV_P); #endif inline_size void loop_fork (EV_P) { #if EV_USE_PORT if (backend == EVBACKEND_PORT ) port_fork (EV_A); #endif #if EV_USE_KQUEUE if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A); #endif #if EV_USE_IOURING if (backend == EVBACKEND_IOURING ) iouring_fork (EV_A); #endif #if EV_USE_LINUXAIO if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A); #endif #if EV_USE_EPOLL if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A); #endif #if EV_USE_INOTIFY infy_fork (EV_A); #endif if (postfork != 2) { #if EV_USE_SIGNALFD /* surprisingly, nothing needs to be done for signalfd, accoridng to docs, it does the right thing on fork */ #endif #if EV_USE_TIMERFD if (ev_is_active (&timerfd_w)) { ev_ref (EV_A); ev_io_stop (EV_A_ &timerfd_w); close (timerfd); timerfd = -2; evtimerfd_init (EV_A); /* reschedule periodics, in case we missed something */ ev_feed_event (EV_A_ &timerfd_w, EV_CUSTOM); } #endif #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE if (ev_is_active (&pipe_w)) { /* pipe_write_wanted must be false now, so modifying fd vars should be safe */ ev_ref (EV_A); ev_io_stop (EV_A_ &pipe_w); if (evpipe [0] >= 0) EV_WIN32_CLOSE_FD (evpipe [0]); evpipe_init (EV_A); /* iterate over everything, in case we missed something before */ ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); } #endif } postfork = 0; } #if EV_MULTIPLICITY ecb_cold struct ev_loop * ev_loop_new (unsigned int flags) EV_NOEXCEPT { EV_P = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop)); memset (EV_A, 0, sizeof (struct ev_loop)); loop_init (EV_A_ flags); if (ev_backend (EV_A)) return EV_A; ev_free (EV_A); return 0; } #endif /* multiplicity */ #if EV_VERIFY ecb_noinline ecb_cold static void verify_watcher (EV_P_ W w) { assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI)); if (w->pending) assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w)); } ecb_noinline ecb_cold static void verify_heap (EV_P_ ANHE *heap, int N) { int i; for (i = HEAP0; i < N + HEAP0; ++i) { assert (("libev: active index mismatch in heap", ev_active (ANHE_w (heap [i])) == i)); assert (("libev: heap condition violated", i == HEAP0 || ANHE_at (heap [HPARENT (i)]) <= ANHE_at (heap [i]))); assert (("libev: heap at cache mismatch", ANHE_at (heap [i]) == ev_at (ANHE_w (heap [i])))); verify_watcher (EV_A_ (W)ANHE_w (heap [i])); } } ecb_noinline ecb_cold static void array_verify (EV_P_ W *ws, int cnt) { while (cnt--) { assert (("libev: active index mismatch", ev_active (ws [cnt]) == cnt + 1)); verify_watcher (EV_A_ ws [cnt]); } } #endif #if EV_FEATURE_API void ecb_cold ev_verify (EV_P) EV_NOEXCEPT { #if EV_VERIFY int i; WL w, w2; assert (activecnt >= -1); assert (fdchangemax >= fdchangecnt); for (i = 0; i < fdchangecnt; ++i) assert (("libev: negative fd in fdchanges", fdchanges [i] >= 0)); assert (anfdmax >= 0); for (i = 0; i < anfdmax; ++i) { int j = 0; for (w = w2 = anfds [i].head; w; w = w->next) { verify_watcher (EV_A_ (W)w); if (j++ & 1) { assert (("libev: io watcher list contains a loop", w != w2)); w2 = w2->next; } assert (("libev: inactive fd watcher on anfd list", ev_active (w) == 1)); assert (("libev: fd mismatch between watcher and anfd", ((ev_io *)w)->fd == i)); } } assert (timermax >= timercnt); verify_heap (EV_A_ timers, timercnt); #if EV_PERIODIC_ENABLE assert (periodicmax >= periodiccnt); verify_heap (EV_A_ periodics, periodiccnt); #endif for (i = NUMPRI; i--; ) { assert (pendingmax [i] >= pendingcnt [i]); #if EV_IDLE_ENABLE assert (idleall >= 0); assert (idlemax [i] >= idlecnt [i]); array_verify (EV_A_ (W *)idles [i], idlecnt [i]); #endif } #if EV_FORK_ENABLE assert (forkmax >= forkcnt); array_verify (EV_A_ (W *)forks, forkcnt); #endif #if EV_CLEANUP_ENABLE assert (cleanupmax >= cleanupcnt); array_verify (EV_A_ (W *)cleanups, cleanupcnt); #endif #if EV_ASYNC_ENABLE assert (asyncmax >= asynccnt); array_verify (EV_A_ (W *)asyncs, asynccnt); #endif #if EV_PREPARE_ENABLE assert (preparemax >= preparecnt); array_verify (EV_A_ (W *)prepares, preparecnt); #endif #if EV_CHECK_ENABLE assert (checkmax >= checkcnt); array_verify (EV_A_ (W *)checks, checkcnt); #endif # if 0 #if EV_CHILD_ENABLE for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next) for (signum = EV_NSIG; signum--; ) if (signals [signum].pending) #endif # endif #endif } #endif #if EV_MULTIPLICITY ecb_cold struct ev_loop * #else int #endif ev_default_loop (unsigned int flags) EV_NOEXCEPT { if (!ev_default_loop_ptr) { #if EV_MULTIPLICITY EV_P = ev_default_loop_ptr = &default_loop_struct; #else ev_default_loop_ptr = 1; #endif loop_init (EV_A_ flags); if (ev_backend (EV_A)) { #if EV_CHILD_ENABLE ev_signal_init (&childev, childcb, SIGCHLD); ev_set_priority (&childev, EV_MAXPRI); ev_signal_start (EV_A_ &childev); ev_unref (EV_A); /* child watcher should not keep loop alive */ #endif } else ev_default_loop_ptr = 0; } return ev_default_loop_ptr; } void ev_loop_fork (EV_P) EV_NOEXCEPT { postfork = 1; } /*****************************************************************************/ void ev_invoke (EV_P_ void *w, int revents) { EV_CB_INVOKE ((W)w, revents); } unsigned int ev_pending_count (EV_P) EV_NOEXCEPT { int pri; unsigned int count = 0; for (pri = NUMPRI; pri--; ) count += pendingcnt [pri]; return count; } ecb_noinline void ev_invoke_pending (EV_P) { pendingpri = NUMPRI; do { --pendingpri; /* pendingpri possibly gets modified in the inner loop */ while (pendingcnt [pendingpri]) { ANPENDING *p = pendings [pendingpri] + --pendingcnt [pendingpri]; p->w->pending = 0; EV_CB_INVOKE (p->w, p->events); EV_FREQUENT_CHECK; } } while (pendingpri); } #if EV_IDLE_ENABLE /* make idle watchers pending. this handles the "call-idle */ /* only when higher priorities are idle" logic */ inline_size void idle_reify (EV_P) { if (ecb_expect_false (idleall)) { int pri; for (pri = NUMPRI; pri--; ) { if (pendingcnt [pri]) break; if (idlecnt [pri]) { queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE); break; } } } } #endif /* make timers pending */ inline_size void timers_reify (EV_P) { EV_FREQUENT_CHECK; if (timercnt && ANHE_at (timers [HEAP0]) < mn_now) { do { ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]); /*assert (("libev: inactive timer on timer heap detected", ev_is_active (w)));*/ /* first reschedule or stop timer */ if (w->repeat) { ev_at (w) += w->repeat; if (ev_at (w) < mn_now) ev_at (w) = mn_now; assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > EV_TS_CONST (0.))); ANHE_at_cache (timers [HEAP0]); downheap (timers, timercnt, HEAP0); } else ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */ EV_FREQUENT_CHECK; feed_reverse (EV_A_ (W)w); } while (timercnt && ANHE_at (timers [HEAP0]) < mn_now); feed_reverse_done (EV_A_ EV_TIMER); } } #if EV_PERIODIC_ENABLE ecb_noinline static void periodic_recalc (EV_P_ ev_periodic *w) { ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL; ev_tstamp at = w->offset + interval * ev_floor ((ev_rt_now - w->offset) / interval); /* the above almost always errs on the low side */ while (at <= ev_rt_now) { ev_tstamp nat = at + w->interval; /* when resolution fails us, we use ev_rt_now */ if (ecb_expect_false (nat == at)) { at = ev_rt_now; break; } at = nat; } ev_at (w) = at; } /* make periodics pending */ inline_size void periodics_reify (EV_P) { EV_FREQUENT_CHECK; while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now) { do { ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]); /*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/ /* first reschedule or stop timer */ if (w->reschedule_cb) { ev_at (w) = w->reschedule_cb (w, ev_rt_now); assert (("libev: ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now)); ANHE_at_cache (periodics [HEAP0]); downheap (periodics, periodiccnt, HEAP0); } else if (w->interval) { periodic_recalc (EV_A_ w); ANHE_at_cache (periodics [HEAP0]); downheap (periodics, periodiccnt, HEAP0); } else ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */ EV_FREQUENT_CHECK; feed_reverse (EV_A_ (W)w); } while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now); feed_reverse_done (EV_A_ EV_PERIODIC); } } /* simply recalculate all periodics */ /* TODO: maybe ensure that at least one event happens when jumping forward? */ ecb_noinline ecb_cold static void periodics_reschedule (EV_P) { int i; /* adjust periodics after time jump */ for (i = HEAP0; i < periodiccnt + HEAP0; ++i) { ev_periodic *w = (ev_periodic *)ANHE_w (periodics [i]); if (w->reschedule_cb) ev_at (w) = w->reschedule_cb (w, ev_rt_now); else if (w->interval) periodic_recalc (EV_A_ w); ANHE_at_cache (periodics [i]); } reheap (periodics, periodiccnt); } #endif /* adjust all timers by a given offset */ ecb_noinline ecb_cold static void timers_reschedule (EV_P_ ev_tstamp adjust) { int i; for (i = 0; i < timercnt; ++i) { ANHE *he = timers + i + HEAP0; ANHE_w (*he)->at += adjust; ANHE_at_cache (*he); } } /* fetch new monotonic and realtime times from the kernel */ /* also detect if there was a timejump, and act accordingly */ inline_speed void time_update (EV_P_ ev_tstamp max_block) { #if EV_USE_MONOTONIC if (ecb_expect_true (have_monotonic)) { int i; ev_tstamp odiff = rtmn_diff; mn_now = get_clock (); /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */ /* interpolate in the meantime */ if (ecb_expect_true (mn_now - now_floor < EV_TS_CONST (MIN_TIMEJUMP * .5))) { ev_rt_now = rtmn_diff + mn_now; return; } now_floor = mn_now; ev_rt_now = ev_time (); /* loop a few times, before making important decisions. * on the choice of "4": one iteration isn't enough, * in case we get preempted during the calls to * ev_time and get_clock. a second call is almost guaranteed * to succeed in that case, though. and looping a few more times * doesn't hurt either as we only do this on time-jumps or * in the unlikely event of having been preempted here. */ for (i = 4; --i; ) { ev_tstamp diff; rtmn_diff = ev_rt_now - mn_now; diff = odiff - rtmn_diff; if (ecb_expect_true ((diff < EV_TS_CONST (0.) ? -diff : diff) < EV_TS_CONST (MIN_TIMEJUMP))) return; /* all is well */ ev_rt_now = ev_time (); mn_now = get_clock (); now_floor = mn_now; } /* no timer adjustment, as the monotonic clock doesn't jump */ /* timers_reschedule (EV_A_ rtmn_diff - odiff) */ # if EV_PERIODIC_ENABLE periodics_reschedule (EV_A); # endif } else #endif { ev_rt_now = ev_time (); if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + EV_TS_CONST (MIN_TIMEJUMP))) { /* adjust timers. this is easy, as the offset is the same for all of them */ timers_reschedule (EV_A_ ev_rt_now - mn_now); #if EV_PERIODIC_ENABLE periodics_reschedule (EV_A); #endif } mn_now = ev_rt_now; } } /* ########## NIO4R PATCHERY HO! ########## */ struct ev_poll_args { struct ev_loop *loop; ev_tstamp waittime; }; static void * ev_backend_poll(void *ptr) { struct ev_poll_args *args = (struct ev_poll_args *)ptr; struct ev_loop *loop = args->loop; backend_poll (EV_A_ args->waittime); return NULL; } /* ######################################## */ int ev_run (EV_P_ int flags) { /* ########## NIO4R PATCHERY HO! ########## */ struct ev_poll_args poll_args; /* ######################################## */ #if EV_FEATURE_API ++loop_depth; #endif assert (("libev: ev_loop recursion during release detected", loop_done != EVBREAK_RECURSE)); loop_done = EVBREAK_CANCEL; EV_INVOKE_PENDING; /* in case we recurse, ensure ordering stays nice and clean */ do { #if EV_VERIFY >= 2 ev_verify (EV_A); #endif #ifndef _WIN32 if (ecb_expect_false (curpid)) /* penalise the forking check even more */ if (ecb_expect_false (getpid () != curpid)) { curpid = getpid (); postfork = 1; } #endif #if EV_FORK_ENABLE /* we might have forked, so queue fork handlers */ if (ecb_expect_false (postfork)) if (forkcnt) { queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); EV_INVOKE_PENDING; } #endif #if EV_PREPARE_ENABLE /* queue prepare watchers (and execute them) */ if (ecb_expect_false (preparecnt)) { queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); EV_INVOKE_PENDING; } #endif if (ecb_expect_false (loop_done)) break; /* we might have forked, so reify kernel state if necessary */ if (ecb_expect_false (postfork)) loop_fork (EV_A); /* update fd-related kernel structures */ fd_reify (EV_A); /* calculate blocking time */ { ev_tstamp waittime = 0.; ev_tstamp sleeptime = 0.; /* remember old timestamp for io_blocktime calculation */ ev_tstamp prev_mn_now = mn_now; /* update time to cancel out callback processing overhead */ time_update (EV_A_ EV_TS_CONST (EV_TSTAMP_HUGE)); /* from now on, we want a pipe-wake-up */ pipe_write_wanted = 1; ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */ if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) { waittime = EV_TS_CONST (MAX_BLOCKTIME); #if EV_USE_TIMERFD /* sleep a lot longer when we can reliably detect timejumps */ if (ecb_expect_true (timerfd >= 0)) waittime = EV_TS_CONST (MAX_BLOCKTIME2); #endif #if !EV_PERIODIC_ENABLE /* without periodics but with monotonic clock there is no need */ /* for any time jump detection, so sleep longer */ if (ecb_expect_true (have_monotonic)) waittime = EV_TS_CONST (MAX_BLOCKTIME2); #endif if (timercnt) { ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now; if (waittime > to) waittime = to; } #if EV_PERIODIC_ENABLE if (periodiccnt) { ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now; if (waittime > to) waittime = to; } #endif /* don't let timeouts decrease the waittime below timeout_blocktime */ if (ecb_expect_false (waittime < timeout_blocktime)) waittime = timeout_blocktime; /* now there are two more special cases left, either we have * already-expired timers, so we should not sleep, or we have timers * that expire very soon, in which case we need to wait for a minimum * amount of time for some event loop backends. */ if (ecb_expect_false (waittime < backend_mintime)) waittime = waittime <= EV_TS_CONST (0.) ? EV_TS_CONST (0.) : backend_mintime; /* extra check because io_blocktime is commonly 0 */ if (ecb_expect_false (io_blocktime)) { sleeptime = io_blocktime - (mn_now - prev_mn_now); if (sleeptime > waittime - backend_mintime) sleeptime = waittime - backend_mintime; if (ecb_expect_true (sleeptime > EV_TS_CONST (0.))) { ev_sleep (sleeptime); waittime -= sleeptime; } } } #if EV_FEATURE_API ++loop_count; #endif assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */ /* ########################## NIO4R PATCHERY HO! ########################## According to the grandwizards of Ruby, locking and unlocking of the global interpreter lock are apparently too powerful a concept for a mere mortal to wield (although redefining what + and - do to numbers is totally cool). And so it came to pass that the only acceptable way to release the global interpreter lock is through a convoluted callback system that thakes a function pointer. While the grandwizard of libev foresaw this sort of scenario, he too attempted to place an API with callbacks on it, one that runs before the system call, and one that runs immediately after. And so it came to pass that trying to wrap everything up in callbacks created two incompatible APIs, Ruby's which releases the global interpreter lock and reacquires it when the callback returns, and libev's, which wants two callbacks, one which runs before the polling operation starts, and one which runs after it finishes. These two systems are incompatible as they both want to use callbacks to solve the same problem, however libev wants to use before/after callbacks, and Ruby wants to use an "around" callback. This presents a significant problem as these two patterns of callbacks are diametrical opposites of each other and thus cannot be composed. And thus we are left with no choice but to patch the internals of libev in order to release a mutex at just the precise moment. This is a great example of a situation where granular locking and unlocking of the GVL is practically required. The goal is to get as close to the system call as possible, and to keep the GVL unlocked for the shortest amount of time possible. Perhaps Ruby could benefit from such an API, e.g: rb_thread_unsafe_dangerous_crazy_blocking_region_begin(...); rb_thread_unsafe_dangerous_crazy_blocking_region_end(...); ####################################################################### */ poll_args.loop = loop; poll_args.waittime = waittime; rb_thread_call_without_gvl(ev_backend_poll, (void *)&poll_args, RUBY_UBF_IO, 0); /* ############################# END PATCHERY ############################ */ assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */ pipe_write_wanted = 0; /* just an optimisation, no fence needed */ ECB_MEMORY_FENCE_ACQUIRE; if (pipe_write_skipped) { assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w))); ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); } /* update ev_rt_now, do magic */ time_update (EV_A_ waittime + sleeptime); } /* queue pending timers and reschedule them */ timers_reify (EV_A); /* relative timers called last */ #if EV_PERIODIC_ENABLE periodics_reify (EV_A); /* absolute timers called first */ #endif #if EV_IDLE_ENABLE /* queue idle watchers unless other events are pending */ idle_reify (EV_A); #endif #if EV_CHECK_ENABLE /* queue check watchers, to be executed first */ if (ecb_expect_false (checkcnt)) queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); #endif EV_INVOKE_PENDING; } while (ecb_expect_true ( activecnt && !loop_done && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT)) )); if (loop_done == EVBREAK_ONE) loop_done = EVBREAK_CANCEL; #if EV_FEATURE_API --loop_depth; #endif return activecnt; } void ev_break (EV_P_ int how) EV_NOEXCEPT { loop_done = how; } void ev_ref (EV_P) EV_NOEXCEPT { ++activecnt; } void ev_unref (EV_P) EV_NOEXCEPT { --activecnt; } void ev_now_update (EV_P) EV_NOEXCEPT { time_update (EV_A_ EV_TSTAMP_HUGE); } void ev_suspend (EV_P) EV_NOEXCEPT { ev_now_update (EV_A); } void ev_resume (EV_P) EV_NOEXCEPT { ev_tstamp mn_prev = mn_now; ev_now_update (EV_A); timers_reschedule (EV_A_ mn_now - mn_prev); #if EV_PERIODIC_ENABLE /* TODO: really do this? */ periodics_reschedule (EV_A); #endif } /*****************************************************************************/ /* singly-linked list management, used when the expected list length is short */ inline_size void wlist_add (WL *head, WL elem) { elem->next = *head; *head = elem; } inline_size void wlist_del (WL *head, WL elem) { while (*head) { if (ecb_expect_true (*head == elem)) { *head = elem->next; break; } head = &(*head)->next; } } /* internal, faster, version of ev_clear_pending */ inline_speed void clear_pending (EV_P_ W w) { if (w->pending) { pendings [ABSPRI (w)][w->pending - 1].w = (W)&pending_w; w->pending = 0; } } int ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT { W w_ = (W)w; int pending = w_->pending; if (ecb_expect_true (pending)) { ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1; p->w = (W)&pending_w; w_->pending = 0; return p->events; } else return 0; } inline_size void pri_adjust (EV_P_ W w) { int pri = ev_priority (w); pri = pri < EV_MINPRI ? EV_MINPRI : pri; pri = pri > EV_MAXPRI ? EV_MAXPRI : pri; ev_set_priority (w, pri); } inline_speed void ev_start (EV_P_ W w, int active) { pri_adjust (EV_A_ w); w->active = active; ev_ref (EV_A); } inline_size void ev_stop (EV_P_ W w) { ev_unref (EV_A); w->active = 0; } /*****************************************************************************/ ecb_noinline void ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT { int fd = w->fd; if (ecb_expect_false (ev_is_active (w))) return; assert (("libev: ev_io_start called with negative fd", fd >= 0)); assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE)))); #if EV_VERIFY >= 2 assert (("libev: ev_io_start called on watcher with invalid fd", fd_valid (fd))); #endif EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, 1); array_needsize (ANFD, anfds, anfdmax, fd + 1, array_needsize_zerofill); wlist_add (&anfds[fd].head, (WL)w); /* common bug, apparently */ assert (("libev: ev_io_start called with corrupted watcher", ((WL)w)->next != (WL)w)); fd_change (EV_A_ fd, w->events & EV__IOFDSET | EV_ANFD_REIFY); w->events &= ~EV__IOFDSET; EV_FREQUENT_CHECK; } ecb_noinline void ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax)); #if EV_VERIFY >= 2 assert (("libev: ev_io_stop called on watcher with invalid fd", fd_valid (w->fd))); #endif EV_FREQUENT_CHECK; wlist_del (&anfds[w->fd].head, (WL)w); ev_stop (EV_A_ (W)w); fd_change (EV_A_ w->fd, EV_ANFD_REIFY); EV_FREQUENT_CHECK; } ecb_noinline void ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; ev_at (w) += mn_now; assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); EV_FREQUENT_CHECK; ++timercnt; ev_start (EV_A_ (W)w, timercnt + HEAP0 - 1); array_needsize (ANHE, timers, timermax, ev_active (w) + 1, array_needsize_noinit); ANHE_w (timers [ev_active (w)]) = (WT)w; ANHE_at_cache (timers [ev_active (w)]); upheap (timers, ev_active (w)); EV_FREQUENT_CHECK; /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/ } ecb_noinline void ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w)); --timercnt; if (ecb_expect_true (active < timercnt + HEAP0)) { timers [active] = timers [timercnt + HEAP0]; adjustheap (timers, timercnt, active); } } ev_at (w) -= mn_now; ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } ecb_noinline void ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT { EV_FREQUENT_CHECK; clear_pending (EV_A_ (W)w); if (ev_is_active (w)) { if (w->repeat) { ev_at (w) = mn_now + w->repeat; ANHE_at_cache (timers [ev_active (w)]); adjustheap (timers, timercnt, ev_active (w)); } else ev_timer_stop (EV_A_ w); } else if (w->repeat) { ev_at (w) = w->repeat; ev_timer_start (EV_A_ w); } EV_FREQUENT_CHECK; } ev_tstamp ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT { return ev_at (w) - (ev_is_active (w) ? mn_now : EV_TS_CONST (0.)); } #if EV_PERIODIC_ENABLE ecb_noinline void ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; #if EV_USE_TIMERFD if (timerfd == -2) evtimerfd_init (EV_A); #endif if (w->reschedule_cb) ev_at (w) = w->reschedule_cb (w, ev_rt_now); else if (w->interval) { assert (("libev: ev_periodic_start called with negative interval value", w->interval >= 0.)); periodic_recalc (EV_A_ w); } else ev_at (w) = w->offset; EV_FREQUENT_CHECK; ++periodiccnt; ev_start (EV_A_ (W)w, periodiccnt + HEAP0 - 1); array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, array_needsize_noinit); ANHE_w (periodics [ev_active (w)]) = (WT)w; ANHE_at_cache (periodics [ev_active (w)]); upheap (periodics, ev_active (w)); EV_FREQUENT_CHECK; /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/ } ecb_noinline void ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w)); --periodiccnt; if (ecb_expect_true (active < periodiccnt + HEAP0)) { periodics [active] = periodics [periodiccnt + HEAP0]; adjustheap (periodics, periodiccnt, active); } } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } ecb_noinline void ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT { /* TODO: use adjustheap and recalculation */ ev_periodic_stop (EV_A_ w); ev_periodic_start (EV_A_ w); } #endif #ifndef SA_RESTART # define SA_RESTART 0 #endif #if EV_SIGNAL_ENABLE ecb_noinline void ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG)); #if EV_MULTIPLICITY assert (("libev: a signal must not be attached to two different loops", !signals [w->signum - 1].loop || signals [w->signum - 1].loop == loop)); signals [w->signum - 1].loop = EV_A; ECB_MEMORY_FENCE_RELEASE; #endif EV_FREQUENT_CHECK; #if EV_USE_SIGNALFD if (sigfd == -2) { sigfd = signalfd (-1, &sigfd_set, SFD_NONBLOCK | SFD_CLOEXEC); if (sigfd < 0 && errno == EINVAL) sigfd = signalfd (-1, &sigfd_set, 0); /* retry without flags */ if (sigfd >= 0) { fd_intern (sigfd); /* doing it twice will not hurt */ sigemptyset (&sigfd_set); ev_io_init (&sigfd_w, sigfdcb, sigfd, EV_READ); ev_set_priority (&sigfd_w, EV_MAXPRI); ev_io_start (EV_A_ &sigfd_w); ev_unref (EV_A); /* signalfd watcher should not keep loop alive */ } } if (sigfd >= 0) { /* TODO: check .head */ sigaddset (&sigfd_set, w->signum); sigprocmask (SIG_BLOCK, &sigfd_set, 0); signalfd (sigfd, &sigfd_set, 0); } #endif ev_start (EV_A_ (W)w, 1); wlist_add (&signals [w->signum - 1].head, (WL)w); if (!((WL)w)->next) # if EV_USE_SIGNALFD if (sigfd < 0) /*TODO*/ # endif { # ifdef _WIN32 evpipe_init (EV_A); signal (w->signum, ev_sighandler); # else struct sigaction sa; evpipe_init (EV_A); sa.sa_handler = ev_sighandler; sigfillset (&sa.sa_mask); sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */ sigaction (w->signum, &sa, 0); if (origflags & EVFLAG_NOSIGMASK) { sigemptyset (&sa.sa_mask); sigaddset (&sa.sa_mask, w->signum); sigprocmask (SIG_UNBLOCK, &sa.sa_mask, 0); } #endif } EV_FREQUENT_CHECK; } ecb_noinline void ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; wlist_del (&signals [w->signum - 1].head, (WL)w); ev_stop (EV_A_ (W)w); if (!signals [w->signum - 1].head) { #if EV_MULTIPLICITY signals [w->signum - 1].loop = 0; /* unattach from signal */ #endif #if EV_USE_SIGNALFD if (sigfd >= 0) { sigset_t ss; sigemptyset (&ss); sigaddset (&ss, w->signum); sigdelset (&sigfd_set, w->signum); signalfd (sigfd, &sigfd_set, 0); sigprocmask (SIG_UNBLOCK, &ss, 0); } else #endif signal (w->signum, SIG_DFL); } EV_FREQUENT_CHECK; } #endif #if EV_CHILD_ENABLE void ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT { #if EV_MULTIPLICITY assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr)); #endif if (ecb_expect_false (ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, 1); wlist_add (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w); EV_FREQUENT_CHECK; } void ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; wlist_del (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w); ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_STAT_ENABLE # ifdef _WIN32 # undef lstat # define lstat(a,b) _stati64 (a,b) # endif #define DEF_STAT_INTERVAL 5.0074891 #define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */ #define MIN_STAT_INTERVAL 0.1074891 ecb_noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents); #if EV_USE_INOTIFY /* the * 2 is to allow for alignment padding, which for some reason is >> 8 */ # define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX) ecb_noinline static void infy_add (EV_P_ ev_stat *w) { w->wd = inotify_add_watch (fs_fd, w->path, IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY | IN_CREATE | IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO | IN_DONT_FOLLOW | IN_MASK_ADD); if (w->wd >= 0) { struct statfs sfs; /* now local changes will be tracked by inotify, but remote changes won't */ /* unless the filesystem is known to be local, we therefore still poll */ /* also do poll on <2.6.25, but with normal frequency */ if (!fs_2625) w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL; else if (!statfs (w->path, &sfs) && (sfs.f_type == 0x1373 /* devfs */ || sfs.f_type == 0x4006 /* fat */ || sfs.f_type == 0x4d44 /* msdos */ || sfs.f_type == 0xEF53 /* ext2/3 */ || sfs.f_type == 0x72b6 /* jffs2 */ || sfs.f_type == 0x858458f6 /* ramfs */ || sfs.f_type == 0x5346544e /* ntfs */ || sfs.f_type == 0x3153464a /* jfs */ || sfs.f_type == 0x9123683e /* btrfs */ || sfs.f_type == 0x52654973 /* reiser3 */ || sfs.f_type == 0x01021994 /* tmpfs */ || sfs.f_type == 0x58465342 /* xfs */)) w->timer.repeat = 0.; /* filesystem is local, kernel new enough */ else w->timer.repeat = w->interval ? w->interval : NFS_STAT_INTERVAL; /* remote, use reduced frequency */ } else { /* can't use inotify, continue to stat */ w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL; /* if path is not there, monitor some parent directory for speedup hints */ /* note that exceeding the hardcoded path limit is not a correctness issue, */ /* but an efficiency issue only */ if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096) { char path [4096]; strcpy (path, w->path); do { int mask = IN_MASK_ADD | IN_DELETE_SELF | IN_MOVE_SELF | (errno == EACCES ? IN_ATTRIB : IN_CREATE | IN_MOVED_TO); char *pend = strrchr (path, '/'); if (!pend || pend == path) break; *pend = 0; w->wd = inotify_add_watch (fs_fd, path, mask); } while (w->wd < 0 && (errno == ENOENT || errno == EACCES)); } } if (w->wd >= 0) wlist_add (&fs_hash [w->wd & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w); /* now re-arm timer, if required */ if (ev_is_active (&w->timer)) ev_ref (EV_A); ev_timer_again (EV_A_ &w->timer); if (ev_is_active (&w->timer)) ev_unref (EV_A); } ecb_noinline static void infy_del (EV_P_ ev_stat *w) { int slot; int wd = w->wd; if (wd < 0) return; w->wd = -2; slot = wd & ((EV_INOTIFY_HASHSIZE) - 1); wlist_del (&fs_hash [slot].head, (WL)w); /* remove this watcher, if others are watching it, they will rearm */ inotify_rm_watch (fs_fd, wd); } ecb_noinline static void infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev) { if (slot < 0) /* overflow, need to check for all hash slots */ for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot) infy_wd (EV_A_ slot, wd, ev); else { WL w_; for (w_ = fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head; w_; ) { ev_stat *w = (ev_stat *)w_; w_ = w_->next; /* lets us remove this watcher and all before it */ if (w->wd == wd || wd == -1) { if (ev->mask & (IN_IGNORED | IN_UNMOUNT | IN_DELETE_SELF)) { wlist_del (&fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w); w->wd = -1; infy_add (EV_A_ w); /* re-add, no matter what */ } stat_timer_cb (EV_A_ &w->timer, 0); } } } } static void infy_cb (EV_P_ ev_io *w, int revents) { char buf [EV_INOTIFY_BUFSIZE]; int ofs; int len = read (fs_fd, buf, sizeof (buf)); for (ofs = 0; ofs < len; ) { struct inotify_event *ev = (struct inotify_event *)(buf + ofs); infy_wd (EV_A_ ev->wd, ev->wd, ev); ofs += sizeof (struct inotify_event) + ev->len; } } inline_size ecb_cold void ev_check_2625 (EV_P) { /* kernels < 2.6.25 are borked * http://www.ussg.indiana.edu/hypermail/linux/kernel/0711.3/1208.html */ if (ev_linux_version () < 0x020619) return; fs_2625 = 1; } inline_size int infy_newfd (void) { #if defined IN_CLOEXEC && defined IN_NONBLOCK int fd = inotify_init1 (IN_CLOEXEC | IN_NONBLOCK); if (fd >= 0) return fd; #endif return inotify_init (); } inline_size void infy_init (EV_P) { if (fs_fd != -2) return; fs_fd = -1; ev_check_2625 (EV_A); fs_fd = infy_newfd (); if (fs_fd >= 0) { fd_intern (fs_fd); ev_io_init (&fs_w, infy_cb, fs_fd, EV_READ); ev_set_priority (&fs_w, EV_MAXPRI); ev_io_start (EV_A_ &fs_w); ev_unref (EV_A); } } inline_size void infy_fork (EV_P) { int slot; if (fs_fd < 0) return; ev_ref (EV_A); ev_io_stop (EV_A_ &fs_w); close (fs_fd); fs_fd = infy_newfd (); if (fs_fd >= 0) { fd_intern (fs_fd); ev_io_set (&fs_w, fs_fd, EV_READ); ev_io_start (EV_A_ &fs_w); ev_unref (EV_A); } for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot) { WL w_ = fs_hash [slot].head; fs_hash [slot].head = 0; while (w_) { ev_stat *w = (ev_stat *)w_; w_ = w_->next; /* lets us add this watcher */ w->wd = -1; if (fs_fd >= 0) infy_add (EV_A_ w); /* re-add, no matter what */ else { w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL; if (ev_is_active (&w->timer)) ev_ref (EV_A); ev_timer_again (EV_A_ &w->timer); if (ev_is_active (&w->timer)) ev_unref (EV_A); } } } } #endif #ifdef _WIN32 # define EV_LSTAT(p,b) _stati64 (p, b) #else # define EV_LSTAT(p,b) lstat (p, b) #endif void ev_stat_stat (EV_P_ ev_stat *w) EV_NOEXCEPT { if (lstat (w->path, &w->attr) < 0) w->attr.st_nlink = 0; else if (!w->attr.st_nlink) w->attr.st_nlink = 1; } ecb_noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents) { ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); ev_statdata prev = w->attr; ev_stat_stat (EV_A_ w); /* memcmp doesn't work on netbsd, they.... do stuff to their struct stat */ if ( prev.st_dev != w->attr.st_dev || prev.st_ino != w->attr.st_ino || prev.st_mode != w->attr.st_mode || prev.st_nlink != w->attr.st_nlink || prev.st_uid != w->attr.st_uid || prev.st_gid != w->attr.st_gid || prev.st_rdev != w->attr.st_rdev || prev.st_size != w->attr.st_size || prev.st_atime != w->attr.st_atime || prev.st_mtime != w->attr.st_mtime || prev.st_ctime != w->attr.st_ctime ) { /* we only update w->prev on actual differences */ /* in case we test more often than invoke the callback, */ /* to ensure that prev is always different to attr */ w->prev = prev; #if EV_USE_INOTIFY if (fs_fd >= 0) { infy_del (EV_A_ w); infy_add (EV_A_ w); ev_stat_stat (EV_A_ w); /* avoid race... */ } #endif ev_feed_event (EV_A_ w, EV_STAT); } } void ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; ev_stat_stat (EV_A_ w); if (w->interval < MIN_STAT_INTERVAL && w->interval) w->interval = MIN_STAT_INTERVAL; ev_timer_init (&w->timer, stat_timer_cb, 0., w->interval ? w->interval : DEF_STAT_INTERVAL); ev_set_priority (&w->timer, ev_priority (w)); #if EV_USE_INOTIFY infy_init (EV_A); if (fs_fd >= 0) infy_add (EV_A_ w); else #endif { ev_timer_again (EV_A_ &w->timer); ev_unref (EV_A); } ev_start (EV_A_ (W)w, 1); EV_FREQUENT_CHECK; } void ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; #if EV_USE_INOTIFY infy_del (EV_A_ w); #endif if (ev_is_active (&w->timer)) { ev_ref (EV_A); ev_timer_stop (EV_A_ &w->timer); } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_IDLE_ENABLE void ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; pri_adjust (EV_A_ (W)w); EV_FREQUENT_CHECK; { int active = ++idlecnt [ABSPRI (w)]; ++idleall; ev_start (EV_A_ (W)w, active); array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, array_needsize_noinit); idles [ABSPRI (w)][active - 1] = w; } EV_FREQUENT_CHECK; } void ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]]; ev_active (idles [ABSPRI (w)][active - 1]) = active; ev_stop (EV_A_ (W)w); --idleall; } EV_FREQUENT_CHECK; } #endif #if EV_PREPARE_ENABLE void ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, ++preparecnt); array_needsize (ev_prepare *, prepares, preparemax, preparecnt, array_needsize_noinit); prepares [preparecnt - 1] = w; EV_FREQUENT_CHECK; } void ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); prepares [active - 1] = prepares [--preparecnt]; ev_active (prepares [active - 1]) = active; } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_CHECK_ENABLE void ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, ++checkcnt); array_needsize (ev_check *, checks, checkmax, checkcnt, array_needsize_noinit); checks [checkcnt - 1] = w; EV_FREQUENT_CHECK; } void ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); checks [active - 1] = checks [--checkcnt]; ev_active (checks [active - 1]) = active; } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_EMBED_ENABLE ecb_noinline void ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT { ev_run (w->other, EVRUN_NOWAIT); } static void embed_io_cb (EV_P_ ev_io *io, int revents) { ev_embed *w = (ev_embed *)(((char *)io) - offsetof (ev_embed, io)); if (ev_cb (w)) ev_feed_event (EV_A_ (W)w, EV_EMBED); else ev_run (w->other, EVRUN_NOWAIT); } static void embed_prepare_cb (EV_P_ ev_prepare *prepare, int revents) { ev_embed *w = (ev_embed *)(((char *)prepare) - offsetof (ev_embed, prepare)); { EV_P = w->other; while (fdchangecnt) { fd_reify (EV_A); ev_run (EV_A_ EVRUN_NOWAIT); } } } #if EV_FORK_ENABLE static void embed_fork_cb (EV_P_ ev_fork *fork_w, int revents) { ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork)); ev_embed_stop (EV_A_ w); { EV_P = w->other; ev_loop_fork (EV_A); ev_run (EV_A_ EVRUN_NOWAIT); } ev_embed_start (EV_A_ w); } #endif #if 0 static void embed_idle_cb (EV_P_ ev_idle *idle, int revents) { ev_idle_stop (EV_A_ idle); } #endif void ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; { EV_P = w->other; assert (("libev: loop to be embedded is not embeddable", backend & ev_embeddable_backends ())); ev_io_init (&w->io, embed_io_cb, backend_fd, EV_READ); } EV_FREQUENT_CHECK; ev_set_priority (&w->io, ev_priority (w)); ev_io_start (EV_A_ &w->io); ev_prepare_init (&w->prepare, embed_prepare_cb); ev_set_priority (&w->prepare, EV_MINPRI); ev_prepare_start (EV_A_ &w->prepare); #if EV_FORK_ENABLE ev_fork_init (&w->fork, embed_fork_cb); ev_fork_start (EV_A_ &w->fork); #endif /*ev_idle_init (&w->idle, e,bed_idle_cb);*/ ev_start (EV_A_ (W)w, 1); EV_FREQUENT_CHECK; } void ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_io_stop (EV_A_ &w->io); ev_prepare_stop (EV_A_ &w->prepare); #if EV_FORK_ENABLE ev_fork_stop (EV_A_ &w->fork); #endif ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_FORK_ENABLE void ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, ++forkcnt); array_needsize (ev_fork *, forks, forkmax, forkcnt, array_needsize_noinit); forks [forkcnt - 1] = w; EV_FREQUENT_CHECK; } void ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); forks [active - 1] = forks [--forkcnt]; ev_active (forks [active - 1]) = active; } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_CLEANUP_ENABLE void ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, ++cleanupcnt); array_needsize (ev_cleanup *, cleanups, cleanupmax, cleanupcnt, array_needsize_noinit); cleanups [cleanupcnt - 1] = w; /* cleanup watchers should never keep a refcount on the loop */ ev_unref (EV_A); EV_FREQUENT_CHECK; } void ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_ref (EV_A); { int active = ev_active (w); cleanups [active - 1] = cleanups [--cleanupcnt]; ev_active (cleanups [active - 1]) = active; } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_ASYNC_ENABLE void ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; w->sent = 0; evpipe_init (EV_A); EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, ++asynccnt); array_needsize (ev_async *, asyncs, asyncmax, asynccnt, array_needsize_noinit); asyncs [asynccnt - 1] = w; EV_FREQUENT_CHECK; } void ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); asyncs [active - 1] = asyncs [--asynccnt]; ev_active (asyncs [active - 1]) = active; } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } void ev_async_send (EV_P_ ev_async *w) EV_NOEXCEPT { w->sent = 1; evpipe_write (EV_A_ &async_pending); } #endif /*****************************************************************************/ struct ev_once { ev_io io; ev_timer to; void (*cb)(int revents, void *arg); void *arg; }; static void once_cb (EV_P_ struct ev_once *once, int revents) { void (*cb)(int revents, void *arg) = once->cb; void *arg = once->arg; ev_io_stop (EV_A_ &once->io); ev_timer_stop (EV_A_ &once->to); ev_free (once); cb (revents, arg); } static void once_cb_io (EV_P_ ev_io *w, int revents) { struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io)); once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->to)); } static void once_cb_to (EV_P_ ev_timer *w, int revents) { struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to)); once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->io)); } void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) EV_NOEXCEPT { struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once)); once->cb = cb; once->arg = arg; ev_init (&once->io, once_cb_io); if (fd >= 0) { ev_io_set (&once->io, fd, events); ev_io_start (EV_A_ &once->io); } ev_init (&once->to, once_cb_to); if (timeout >= 0.) { ev_timer_set (&once->to, timeout, 0.); ev_timer_start (EV_A_ &once->to); } } /*****************************************************************************/ #if EV_WALK_ENABLE ecb_cold void ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_NOEXCEPT { int i, j; ev_watcher_list *wl, *wn; if (types & (EV_IO | EV_EMBED)) for (i = 0; i < anfdmax; ++i) for (wl = anfds [i].head; wl; ) { wn = wl->next; #if EV_EMBED_ENABLE if (ev_cb ((ev_io *)wl) == embed_io_cb) { if (types & EV_EMBED) cb (EV_A_ EV_EMBED, ((char *)wl) - offsetof (struct ev_embed, io)); } else #endif #if EV_USE_INOTIFY if (ev_cb ((ev_io *)wl) == infy_cb) ; else #endif if ((ev_io *)wl != &pipe_w) if (types & EV_IO) cb (EV_A_ EV_IO, wl); wl = wn; } if (types & (EV_TIMER | EV_STAT)) for (i = timercnt + HEAP0; i-- > HEAP0; ) #if EV_STAT_ENABLE /*TODO: timer is not always active*/ if (ev_cb ((ev_timer *)ANHE_w (timers [i])) == stat_timer_cb) { if (types & EV_STAT) cb (EV_A_ EV_STAT, ((char *)ANHE_w (timers [i])) - offsetof (struct ev_stat, timer)); } else #endif if (types & EV_TIMER) cb (EV_A_ EV_TIMER, ANHE_w (timers [i])); #if EV_PERIODIC_ENABLE if (types & EV_PERIODIC) for (i = periodiccnt + HEAP0; i-- > HEAP0; ) cb (EV_A_ EV_PERIODIC, ANHE_w (periodics [i])); #endif #if EV_IDLE_ENABLE if (types & EV_IDLE) for (j = NUMPRI; j--; ) for (i = idlecnt [j]; i--; ) cb (EV_A_ EV_IDLE, idles [j][i]); #endif #if EV_FORK_ENABLE if (types & EV_FORK) for (i = forkcnt; i--; ) if (ev_cb (forks [i]) != embed_fork_cb) cb (EV_A_ EV_FORK, forks [i]); #endif #if EV_ASYNC_ENABLE if (types & EV_ASYNC) for (i = asynccnt; i--; ) cb (EV_A_ EV_ASYNC, asyncs [i]); #endif #if EV_PREPARE_ENABLE if (types & EV_PREPARE) for (i = preparecnt; i--; ) # if EV_EMBED_ENABLE if (ev_cb (prepares [i]) != embed_prepare_cb) # endif cb (EV_A_ EV_PREPARE, prepares [i]); #endif #if EV_CHECK_ENABLE if (types & EV_CHECK) for (i = checkcnt; i--; ) cb (EV_A_ EV_CHECK, checks [i]); #endif #if EV_SIGNAL_ENABLE if (types & EV_SIGNAL) for (i = 0; i < EV_NSIG - 1; ++i) for (wl = signals [i].head; wl; ) { wn = wl->next; cb (EV_A_ EV_SIGNAL, wl); wl = wn; } #endif #if EV_CHILD_ENABLE if (types & EV_CHILD) for (i = (EV_PID_HASHSIZE); i--; ) for (wl = childs [i]; wl; ) { wn = wl->next; cb (EV_A_ EV_CHILD, wl); wl = wn; } #endif /* EV_STAT 0x00001000 /* stat data changed */ /* EV_EMBED 0x00010000 /* embedded event loop needs sweep */ } #endif #if EV_MULTIPLICITY #include "ev_wrap.h" #endif nio4r-2.7.3/ext/libev/ev_select.c0000644000004100000410000002122514632135320016625 0ustar www-datawww-data/* * libev select fd activity backend * * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #ifndef _WIN32 /* for unix systems */ # include # ifndef __hpux /* for REAL unix systems */ # include # endif #endif #ifndef EV_SELECT_USE_FD_SET # ifdef NFDBITS # define EV_SELECT_USE_FD_SET 0 # else # define EV_SELECT_USE_FD_SET 1 # endif #endif #if EV_SELECT_IS_WINSOCKET # undef EV_SELECT_USE_FD_SET # define EV_SELECT_USE_FD_SET 1 # undef NFDBITS # define NFDBITS 0 #endif #if !EV_SELECT_USE_FD_SET # define NFDBYTES (NFDBITS / 8) #endif #include static void select_modify (EV_P_ int fd, int oev, int nev) { if (oev == nev) return; { #if EV_SELECT_USE_FD_SET #if EV_SELECT_IS_WINSOCKET SOCKET handle = anfds [fd].handle; #else int handle = fd; #endif assert (("libev: fd >= FD_SETSIZE passed to fd_set-based select backend", fd < FD_SETSIZE)); /* FD_SET is broken on windows (it adds the fd to a set twice or more, * which eventually leads to overflows). Need to call it only on changes. */ #if EV_SELECT_IS_WINSOCKET if ((oev ^ nev) & EV_READ) #endif if (nev & EV_READ) FD_SET (handle, (fd_set *)vec_ri); else FD_CLR (handle, (fd_set *)vec_ri); #if EV_SELECT_IS_WINSOCKET if ((oev ^ nev) & EV_WRITE) #endif if (nev & EV_WRITE) FD_SET (handle, (fd_set *)vec_wi); else FD_CLR (handle, (fd_set *)vec_wi); #else int word = fd / NFDBITS; fd_mask mask = 1UL << (fd % NFDBITS); if (ecb_expect_false (vec_max <= word)) { int new_max = word + 1; vec_ri = ev_realloc (vec_ri, new_max * NFDBYTES); vec_ro = ev_realloc (vec_ro, new_max * NFDBYTES); /* could free/malloc */ vec_wi = ev_realloc (vec_wi, new_max * NFDBYTES); vec_wo = ev_realloc (vec_wo, new_max * NFDBYTES); /* could free/malloc */ #ifdef _WIN32 vec_eo = ev_realloc (vec_eo, new_max * NFDBYTES); /* could free/malloc */ #endif for (; vec_max < new_max; ++vec_max) ((fd_mask *)vec_ri) [vec_max] = ((fd_mask *)vec_wi) [vec_max] = 0; } ((fd_mask *)vec_ri) [word] |= mask; if (!(nev & EV_READ)) ((fd_mask *)vec_ri) [word] &= ~mask; ((fd_mask *)vec_wi) [word] |= mask; if (!(nev & EV_WRITE)) ((fd_mask *)vec_wi) [word] &= ~mask; #endif } } static void select_poll (EV_P_ ev_tstamp timeout) { struct timeval tv; int res; int fd_setsize; EV_RELEASE_CB; EV_TV_SET (tv, timeout); #if EV_SELECT_USE_FD_SET fd_setsize = sizeof (fd_set); #else fd_setsize = vec_max * NFDBYTES; #endif memcpy (vec_ro, vec_ri, fd_setsize); memcpy (vec_wo, vec_wi, fd_setsize); #ifdef _WIN32 /* pass in the write set as except set. * the idea behind this is to work around a windows bug that causes * errors to be reported as an exception and not by setting * the writable bit. this is so uncontrollably lame. */ memcpy (vec_eo, vec_wi, fd_setsize); res = select (vec_max * NFDBITS, (fd_set *)vec_ro, (fd_set *)vec_wo, (fd_set *)vec_eo, &tv); #elif EV_SELECT_USE_FD_SET fd_setsize = anfdmax < FD_SETSIZE ? anfdmax : FD_SETSIZE; res = select (fd_setsize, (fd_set *)vec_ro, (fd_set *)vec_wo, 0, &tv); #else res = select (vec_max * NFDBITS, (fd_set *)vec_ro, (fd_set *)vec_wo, 0, &tv); #endif EV_ACQUIRE_CB; if (ecb_expect_false (res < 0)) { #if EV_SELECT_IS_WINSOCKET errno = WSAGetLastError (); #endif #ifdef WSABASEERR /* on windows, select returns incompatible error codes, fix this */ if (errno >= WSABASEERR && errno < WSABASEERR + 1000) if (errno == WSAENOTSOCK) errno = EBADF; else errno -= WSABASEERR; #endif #ifdef _WIN32 /* select on windows erroneously returns EINVAL when no fd sets have been * provided (this is documented). what microsoft doesn't tell you that this bug * exists even when the fd sets _are_ provided, so we have to check for this bug * here and emulate by sleeping manually. * we also get EINVAL when the timeout is invalid, but we ignore this case here * and assume that EINVAL always means: you have to wait manually. */ if (errno == EINVAL) { if (timeout) { unsigned long ms = EV_TS_TO_MSEC (timeout); Sleep (ms ? ms : 1); } return; } #endif if (errno == EBADF) fd_ebadf (EV_A); else if (errno == ENOMEM && !syserr_cb) fd_enomem (EV_A); else if (errno != EINTR) ev_syserr ("(libev) select"); return; } #if EV_SELECT_USE_FD_SET { int fd; for (fd = 0; fd < anfdmax; ++fd) if (anfds [fd].events) { int events = 0; #if EV_SELECT_IS_WINSOCKET SOCKET handle = anfds [fd].handle; #else int handle = fd; #endif if (FD_ISSET (handle, (fd_set *)vec_ro)) events |= EV_READ; if (FD_ISSET (handle, (fd_set *)vec_wo)) events |= EV_WRITE; #ifdef _WIN32 if (FD_ISSET (handle, (fd_set *)vec_eo)) events |= EV_WRITE; #endif if (ecb_expect_true (events)) fd_event (EV_A_ fd, events); } } #else { int word, bit; for (word = vec_max; word--; ) { fd_mask word_r = ((fd_mask *)vec_ro) [word]; fd_mask word_w = ((fd_mask *)vec_wo) [word]; #ifdef _WIN32 word_w |= ((fd_mask *)vec_eo) [word]; #endif if (word_r || word_w) for (bit = NFDBITS; bit--; ) { fd_mask mask = 1UL << bit; int events = 0; events |= word_r & mask ? EV_READ : 0; events |= word_w & mask ? EV_WRITE : 0; if (ecb_expect_true (events)) fd_event (EV_A_ word * NFDBITS + bit, events); } } } #endif } inline_size int select_init (EV_P_ int flags) { backend_mintime = EV_TS_CONST (1e-6); backend_modify = select_modify; backend_poll = select_poll; #if EV_SELECT_USE_FD_SET vec_ri = ev_malloc (sizeof (fd_set)); FD_ZERO ((fd_set *)vec_ri); vec_ro = ev_malloc (sizeof (fd_set)); vec_wi = ev_malloc (sizeof (fd_set)); FD_ZERO ((fd_set *)vec_wi); vec_wo = ev_malloc (sizeof (fd_set)); #ifdef _WIN32 vec_eo = ev_malloc (sizeof (fd_set)); #endif #else vec_max = 0; vec_ri = 0; vec_ro = 0; vec_wi = 0; vec_wo = 0; #ifdef _WIN32 vec_eo = 0; #endif #endif return EVBACKEND_SELECT; } inline_size void select_destroy (EV_P) { ev_free (vec_ri); ev_free (vec_ro); ev_free (vec_wi); ev_free (vec_wo); #ifdef _WIN32 ev_free (vec_eo); #endif } nio4r-2.7.3/ext/libev/ev_linuxaio.c0000644000004100000410000005176614632135320017213 0ustar www-datawww-data/* * libev linux aio fd activity backend * * Copyright (c) 2019 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ /* * general notes about linux aio: * * a) at first, the linux aio IOCB_CMD_POLL functionality introduced in * 4.18 looks too good to be true: both watchers and events can be * batched, and events can even be handled in userspace using * a ring buffer shared with the kernel. watchers can be canceled * regardless of whether the fd has been closed. no problems with fork. * ok, the ring buffer is 200% undocumented (there isn't even a * header file), but otherwise, it's pure bliss! * b) ok, watchers are one-shot, so you have to re-arm active ones * on every iteration. so much for syscall-less event handling, * but at least these re-arms can be batched, no big deal, right? * c) well, linux as usual: the documentation lies to you: io_submit * sometimes returns EINVAL because the kernel doesn't feel like * handling your poll mask - ttys can be polled for POLLOUT, * POLLOUT|POLLIN, but polling for POLLIN fails. just great, * so we have to fall back to something else (hello, epoll), * but at least the fallback can be slow, because these are * exceptional cases, right? * d) hmm, you have to tell the kernel the maximum number of watchers * you want to queue when initialising the aio context. but of * course the real limit is magically calculated in the kernel, and * is often higher then we asked for. so we just have to destroy * the aio context and re-create it a bit larger if we hit the limit. * (starts to remind you of epoll? well, it's a bit more deterministic * and less gambling, but still ugly as hell). * e) that's when you find out you can also hit an arbitrary system-wide * limit. or the kernel simply doesn't want to handle your watchers. * what the fuck do we do then? you guessed it, in the middle * of event handling we have to switch to 100% epoll polling. and * that better is as fast as normal epoll polling, so you practically * have to use the normal epoll backend with all its quirks. * f) end result of this train wreck: it inherits all the disadvantages * from epoll, while adding a number on its own. why even bother to use * it? because if conditions are right and your fds are supported and you * don't hit a limit, this backend is actually faster, doesn't gamble with * your fds, batches watchers and events and doesn't require costly state * recreates. well, until it does. * g) all of this makes this backend use almost twice as much code as epoll. * which in turn uses twice as much code as poll. and that#s not counting * the fact that this backend also depends on the epoll backend, making * it three times as much code as poll, or kqueue. * h) bleah. why can't linux just do kqueue. sure kqueue is ugly, but by now * it's clear that whatever linux comes up with is far, far, far worse. */ #include /* actually linux/time.h, but we must assume they are compatible */ #include #include /*****************************************************************************/ /* syscall wrapdadoop - this section has the raw api/abi definitions */ #include /* no glibc wrappers */ /* aio_abi.h is not versioned in any way, so we cannot test for its existance */ #define IOCB_CMD_POLL 5 /* taken from linux/fs/aio.c. yup, that's a .c file. * not only is this totally undocumented, not even the source code * can tell you what the future semantics of compat_features and * incompat_features are, or what header_length actually is for. */ #define AIO_RING_MAGIC 0xa10a10a1 #define EV_AIO_RING_INCOMPAT_FEATURES 0 struct aio_ring { unsigned id; /* kernel internal index number */ unsigned nr; /* number of io_events */ unsigned head; /* Written to by userland or by kernel. */ unsigned tail; unsigned magic; unsigned compat_features; unsigned incompat_features; unsigned header_length; /* size of aio_ring */ struct io_event io_events[0]; }; inline_size int evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp) { return ev_syscall2 (SYS_io_setup, nr_events, ctx_idp); } inline_size int evsys_io_destroy (aio_context_t ctx_id) { return ev_syscall1 (SYS_io_destroy, ctx_id); } inline_size int evsys_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) { return ev_syscall3 (SYS_io_submit, ctx_id, nr, cbp); } inline_size int evsys_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) { return ev_syscall3 (SYS_io_cancel, ctx_id, cbp, result); } inline_size int evsys_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) { return ev_syscall5 (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); } /*****************************************************************************/ /* actual backed implementation */ ecb_cold static int linuxaio_nr_events (EV_P) { /* we start with 16 iocbs and incraese from there * that's tiny, but the kernel has a rather low system-wide * limit that can be reached quickly, so let's be parsimonious * with this resource. * Rest assured, the kernel generously rounds up small and big numbers * in different ways (but doesn't seem to charge you for it). * The 15 here is because the kernel usually has a power of two as aio-max-nr, * and this helps to take advantage of that limit. */ /* we try to fill 4kB pages exactly. * the ring buffer header is 32 bytes, every io event is 32 bytes. * the kernel takes the io requests number, doubles it, adds 2 * and adds the ring buffer. * the way we use this is by starting low, and then roughly doubling the * size each time we hit a limit. */ int requests = 15 << linuxaio_iteration; int one_page = (4096 / sizeof (struct io_event) ) / 2; /* how many fit into one page */ int first_page = ((4096 - sizeof (struct aio_ring)) / sizeof (struct io_event) - 2) / 2; /* how many fit into the first page */ /* if everything fits into one page, use count exactly */ if (requests > first_page) /* otherwise, round down to full pages and add the first page */ requests = requests / one_page * one_page + first_page; return requests; } /* we use out own wrapper structure in case we ever want to do something "clever" */ typedef struct aniocb { struct iocb io; /*int inuse;*/ } *ANIOCBP; inline_size void linuxaio_array_needsize_iocbp (ANIOCBP *base, int offset, int count) { while (count--) { /* TODO: quite the overhead to allocate every iocb separately, maybe use our own allocator? */ ANIOCBP iocb = (ANIOCBP)ev_malloc (sizeof (*iocb)); /* full zero initialise is probably not required at the moment, but * this is not well documented, so we better do it. */ memset (iocb, 0, sizeof (*iocb)); iocb->io.aio_lio_opcode = IOCB_CMD_POLL; iocb->io.aio_fildes = offset; base [offset++] = iocb; } } ecb_cold static void linuxaio_free_iocbp (EV_P) { while (linuxaio_iocbpmax--) ev_free (linuxaio_iocbps [linuxaio_iocbpmax]); linuxaio_iocbpmax = 0; /* next resize will completely reallocate the array, at some overhead */ } static void linuxaio_modify (EV_P_ int fd, int oev, int nev) { array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); ANIOCBP iocb = linuxaio_iocbps [fd]; ANFD *anfd = &anfds [fd]; if (ecb_expect_false (iocb->io.aio_reqprio < 0)) { /* we handed this fd over to epoll, so undo this first */ /* we do it manually because the optimisations on epoll_modify won't do us any good */ epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0); anfd->emask = 0; iocb->io.aio_reqprio = 0; } else if (ecb_expect_false (iocb->io.aio_buf)) { /* iocb active, so cancel it first before resubmit */ /* this assumes we only ever get one call per fd per loop iteration */ for (;;) { /* on all relevant kernels, io_cancel fails with EINPROGRESS on "success" */ if (ecb_expect_false (evsys_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0) == 0)) break; if (ecb_expect_true (errno == EINPROGRESS)) break; /* the EINPROGRESS test is for nicer error message. clumsy. */ if (errno != EINTR) { assert (("libev: linuxaio unexpected io_cancel failed", errno != EINTR && errno != EINPROGRESS)); break; } } /* increment generation counter to avoid handling old events */ ++anfd->egen; } iocb->io.aio_buf = (nev & EV_READ ? POLLIN : 0) | (nev & EV_WRITE ? POLLOUT : 0); if (nev) { iocb->io.aio_data = (uint32_t)fd | ((__u64)(uint32_t)anfd->egen << 32); /* queue iocb up for io_submit */ /* this assumes we only ever get one call per fd per loop iteration */ ++linuxaio_submitcnt; array_needsize (struct iocb *, linuxaio_submits, linuxaio_submitmax, linuxaio_submitcnt, array_needsize_noinit); linuxaio_submits [linuxaio_submitcnt - 1] = &iocb->io; } } static void linuxaio_epoll_cb (EV_P_ struct ev_io *w, int revents) { epoll_poll (EV_A_ 0); } inline_speed void linuxaio_fd_rearm (EV_P_ int fd) { anfds [fd].events = 0; linuxaio_iocbps [fd]->io.aio_buf = 0; fd_change (EV_A_ fd, EV_ANFD_REIFY); } static void linuxaio_parse_events (EV_P_ struct io_event *ev, int nr) { while (nr) { int fd = ev->data & 0xffffffff; uint32_t gen = ev->data >> 32; int res = ev->res; assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax)); /* only accept events if generation counter matches */ if (ecb_expect_true (gen == (uint32_t)anfds [fd].egen)) { /* feed events, we do not expect or handle POLLNVAL */ fd_event ( EV_A_ fd, (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) ); /* linux aio is oneshot: rearm fd. TODO: this does more work than strictly needed */ linuxaio_fd_rearm (EV_A_ fd); } --nr; ++ev; } } /* get any events from ring buffer, return true if any were handled */ static int linuxaio_get_events_from_ring (EV_P) { struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; unsigned head, tail; /* the kernel reads and writes both of these variables, */ /* as a C extension, we assume that volatile use here */ /* both makes reads atomic and once-only */ head = *(volatile unsigned *)&ring->head; ECB_MEMORY_FENCE_ACQUIRE; tail = *(volatile unsigned *)&ring->tail; if (head == tail) return 0; /* parse all available events, but only once, to avoid starvation */ if (ecb_expect_true (tail > head)) /* normal case around */ linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head); else /* wrapped around */ { linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); linuxaio_parse_events (EV_A_ ring->io_events, tail); } ECB_MEMORY_FENCE_RELEASE; /* as an extension to C, we hope that the volatile will make this atomic and once-only */ *(volatile unsigned *)&ring->head = tail; return 1; } inline_size int linuxaio_ringbuf_valid (EV_P) { struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; return ecb_expect_true (ring->magic == AIO_RING_MAGIC) && ring->incompat_features == EV_AIO_RING_INCOMPAT_FEATURES && ring->header_length == sizeof (struct aio_ring); /* TODO: or use it to find io_event[0]? */ } /* read at least one event from kernel, or timeout */ inline_size void linuxaio_get_events (EV_P_ ev_tstamp timeout) { struct timespec ts; struct io_event ioev[8]; /* 256 octet stack space */ int want = 1; /* how many events to request */ int ringbuf_valid = linuxaio_ringbuf_valid (EV_A); if (ecb_expect_true (ringbuf_valid)) { /* if the ring buffer has any events, we don't wait or call the kernel at all */ if (linuxaio_get_events_from_ring (EV_A)) return; /* if the ring buffer is empty, and we don't have a timeout, then don't call the kernel */ if (!timeout) return; } else /* no ringbuffer, request slightly larger batch */ want = sizeof (ioev) / sizeof (ioev [0]); /* no events, so wait for some * for fairness reasons, we do this in a loop, to fetch all events */ for (;;) { int res; EV_RELEASE_CB; EV_TS_SET (ts, timeout); res = evsys_io_getevents (linuxaio_ctx, 1, want, ioev, &ts); EV_ACQUIRE_CB; if (res < 0) if (errno == EINTR) /* ignored, retry */; else ev_syserr ("(libev) linuxaio io_getevents"); else if (res) { /* at least one event available, handle them */ linuxaio_parse_events (EV_A_ ioev, res); if (ecb_expect_true (ringbuf_valid)) { /* if we have a ring buffer, handle any remaining events in it */ linuxaio_get_events_from_ring (EV_A); /* at this point, we should have handled all outstanding events */ break; } else if (res < want) /* otherwise, if there were fewere events than we wanted, we assume there are no more */ break; } else break; /* no events from the kernel, we are done */ timeout = EV_TS_CONST (0.); /* only wait in the first iteration */ } } inline_size int linuxaio_io_setup (EV_P) { linuxaio_ctx = 0; return evsys_io_setup (linuxaio_nr_events (EV_A), &linuxaio_ctx); } static void linuxaio_poll (EV_P_ ev_tstamp timeout) { int submitted; /* first phase: submit new iocbs */ /* io_submit might return less than the requested number of iocbs */ /* this is, afaics, only because of errors, but we go by the book and use a loop, */ /* which allows us to pinpoint the erroneous iocb */ for (submitted = 0; submitted < linuxaio_submitcnt; ) { int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); if (ecb_expect_false (res < 0)) if (errno == EINVAL) { /* This happens for unsupported fds, officially, but in my testing, * also randomly happens for supported fds. We fall back to good old * poll() here, under the assumption that this is a very rare case. * See https://lore.kernel.org/patchwork/patch/1047453/ to see * discussion about such a case (ttys) where polling for POLLIN * fails but POLLIN|POLLOUT works. */ struct iocb *iocb = linuxaio_submits [submitted]; epoll_modify (EV_A_ iocb->aio_fildes, 0, anfds [iocb->aio_fildes].events); iocb->aio_reqprio = -1; /* mark iocb as epoll */ res = 1; /* skip this iocb - another iocb, another chance */ } else if (errno == EAGAIN) { /* This happens when the ring buffer is full, or some other shit we * don't know and isn't documented. Most likely because we have too * many requests and linux aio can't be assed to handle them. * In this case, we try to allocate a larger ring buffer, freeing * ours first. This might fail, in which case we have to fall back to 100% * epoll. * God, how I hate linux not getting its act together. Ever. */ evsys_io_destroy (linuxaio_ctx); linuxaio_submitcnt = 0; /* rearm all fds with active iocbs */ { int fd; for (fd = 0; fd < linuxaio_iocbpmax; ++fd) if (linuxaio_iocbps [fd]->io.aio_buf) linuxaio_fd_rearm (EV_A_ fd); } ++linuxaio_iteration; if (linuxaio_io_setup (EV_A) < 0) { /* TODO: rearm all and recreate epoll backend from scratch */ /* TODO: might be more prudent? */ /* to bad, we can't get a new aio context, go 100% epoll */ linuxaio_free_iocbp (EV_A); ev_io_stop (EV_A_ &linuxaio_epoll_w); ev_ref (EV_A); linuxaio_ctx = 0; backend = EVBACKEND_EPOLL; backend_modify = epoll_modify; backend_poll = epoll_poll; } timeout = EV_TS_CONST (0.); /* it's easiest to handle this mess in another iteration */ return; } else if (errno == EBADF) { assert (("libev: event loop rejected bad fd", errno != EBADF)); fd_kill (EV_A_ linuxaio_submits [submitted]->aio_fildes); res = 1; /* skip this iocb */ } else if (errno == EINTR) /* not seen in reality, not documented */ res = 0; /* silently ignore and retry */ else { ev_syserr ("(libev) linuxaio io_submit"); res = 0; } submitted += res; } linuxaio_submitcnt = 0; /* second phase: fetch and parse events */ linuxaio_get_events (EV_A_ timeout); } inline_size int linuxaio_init (EV_P_ int flags) { /* would be great to have a nice test for IOCB_CMD_POLL instead */ /* also: test some semi-common fd types, such as files and ttys in recommended_backends */ /* 4.18 introduced IOCB_CMD_POLL, 4.19 made epoll work, and we need that */ if (ev_linux_version () < 0x041300) return 0; if (!epoll_init (EV_A_ 0)) return 0; linuxaio_iteration = 0; if (linuxaio_io_setup (EV_A) < 0) { epoll_destroy (EV_A); return 0; } ev_io_init (&linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ); ev_set_priority (&linuxaio_epoll_w, EV_MAXPRI); ev_io_start (EV_A_ &linuxaio_epoll_w); ev_unref (EV_A); /* watcher should not keep loop alive */ backend_modify = linuxaio_modify; backend_poll = linuxaio_poll; linuxaio_iocbpmax = 0; linuxaio_iocbps = 0; linuxaio_submits = 0; linuxaio_submitmax = 0; linuxaio_submitcnt = 0; return EVBACKEND_LINUXAIO; } inline_size void linuxaio_destroy (EV_P) { epoll_destroy (EV_A); linuxaio_free_iocbp (EV_A); evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */ } ecb_cold static void linuxaio_fork (EV_P) { linuxaio_submitcnt = 0; /* all pointers were invalidated */ linuxaio_free_iocbp (EV_A); /* this frees all iocbs, which is very heavy-handed */ evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */ linuxaio_iteration = 0; /* we start over in the child */ while (linuxaio_io_setup (EV_A) < 0) ev_syserr ("(libev) linuxaio io_setup"); /* forking epoll should also effectively unregister all fds from the backend */ epoll_fork (EV_A); /* epoll_fork already did this. hopefully */ /*fd_rearm_all (EV_A);*/ ev_io_stop (EV_A_ &linuxaio_epoll_w); ev_io_set (EV_A_ &linuxaio_epoll_w, backend_fd, EV_READ); ev_io_start (EV_A_ &linuxaio_epoll_w); } nio4r-2.7.3/ext/libev/ev_wrap.h0000644000004100000410000002006014632135320016320 0ustar www-datawww-data/* DO NOT EDIT, automatically generated by update_ev_wrap */ #ifndef EV_WRAP_H #define EV_WRAP_H #define acquire_cb ((loop)->acquire_cb) #define activecnt ((loop)->activecnt) #define anfdmax ((loop)->anfdmax) #define anfds ((loop)->anfds) #define async_pending ((loop)->async_pending) #define asynccnt ((loop)->asynccnt) #define asyncmax ((loop)->asyncmax) #define asyncs ((loop)->asyncs) #define backend ((loop)->backend) #define backend_fd ((loop)->backend_fd) #define backend_mintime ((loop)->backend_mintime) #define backend_modify ((loop)->backend_modify) #define backend_poll ((loop)->backend_poll) #define checkcnt ((loop)->checkcnt) #define checkmax ((loop)->checkmax) #define checks ((loop)->checks) #define cleanupcnt ((loop)->cleanupcnt) #define cleanupmax ((loop)->cleanupmax) #define cleanups ((loop)->cleanups) #define curpid ((loop)->curpid) #define epoll_epermcnt ((loop)->epoll_epermcnt) #define epoll_epermmax ((loop)->epoll_epermmax) #define epoll_eperms ((loop)->epoll_eperms) #define epoll_eventmax ((loop)->epoll_eventmax) #define epoll_events ((loop)->epoll_events) #define evpipe ((loop)->evpipe) #define fdchangecnt ((loop)->fdchangecnt) #define fdchangemax ((loop)->fdchangemax) #define fdchanges ((loop)->fdchanges) #define forkcnt ((loop)->forkcnt) #define forkmax ((loop)->forkmax) #define forks ((loop)->forks) #define fs_2625 ((loop)->fs_2625) #define fs_fd ((loop)->fs_fd) #define fs_hash ((loop)->fs_hash) #define fs_w ((loop)->fs_w) #define idleall ((loop)->idleall) #define idlecnt ((loop)->idlecnt) #define idlemax ((loop)->idlemax) #define idles ((loop)->idles) #define invoke_cb ((loop)->invoke_cb) #define io_blocktime ((loop)->io_blocktime) #define iocp ((loop)->iocp) #define iouring_cq_cqes ((loop)->iouring_cq_cqes) #define iouring_cq_head ((loop)->iouring_cq_head) #define iouring_cq_overflow ((loop)->iouring_cq_overflow) #define iouring_cq_ring ((loop)->iouring_cq_ring) #define iouring_cq_ring_entries ((loop)->iouring_cq_ring_entries) #define iouring_cq_ring_mask ((loop)->iouring_cq_ring_mask) #define iouring_cq_ring_size ((loop)->iouring_cq_ring_size) #define iouring_cq_tail ((loop)->iouring_cq_tail) #define iouring_entries ((loop)->iouring_entries) #define iouring_fd ((loop)->iouring_fd) #define iouring_max_entries ((loop)->iouring_max_entries) #define iouring_sq_array ((loop)->iouring_sq_array) #define iouring_sq_dropped ((loop)->iouring_sq_dropped) #define iouring_sq_flags ((loop)->iouring_sq_flags) #define iouring_sq_head ((loop)->iouring_sq_head) #define iouring_sq_ring ((loop)->iouring_sq_ring) #define iouring_sq_ring_entries ((loop)->iouring_sq_ring_entries) #define iouring_sq_ring_mask ((loop)->iouring_sq_ring_mask) #define iouring_sq_ring_size ((loop)->iouring_sq_ring_size) #define iouring_sq_tail ((loop)->iouring_sq_tail) #define iouring_sqes ((loop)->iouring_sqes) #define iouring_sqes_size ((loop)->iouring_sqes_size) #define iouring_tfd ((loop)->iouring_tfd) #define iouring_tfd_to ((loop)->iouring_tfd_to) #define iouring_tfd_w ((loop)->iouring_tfd_w) #define iouring_to_submit ((loop)->iouring_to_submit) #define kqueue_changecnt ((loop)->kqueue_changecnt) #define kqueue_changemax ((loop)->kqueue_changemax) #define kqueue_changes ((loop)->kqueue_changes) #define kqueue_eventmax ((loop)->kqueue_eventmax) #define kqueue_events ((loop)->kqueue_events) #define kqueue_fd_pid ((loop)->kqueue_fd_pid) #define linuxaio_ctx ((loop)->linuxaio_ctx) #define linuxaio_epoll_w ((loop)->linuxaio_epoll_w) #define linuxaio_iocbpmax ((loop)->linuxaio_iocbpmax) #define linuxaio_iocbps ((loop)->linuxaio_iocbps) #define linuxaio_iteration ((loop)->linuxaio_iteration) #define linuxaio_submitcnt ((loop)->linuxaio_submitcnt) #define linuxaio_submitmax ((loop)->linuxaio_submitmax) #define linuxaio_submits ((loop)->linuxaio_submits) #define loop_count ((loop)->loop_count) #define loop_depth ((loop)->loop_depth) #define loop_done ((loop)->loop_done) #define mn_now ((loop)->mn_now) #define now_floor ((loop)->now_floor) #define origflags ((loop)->origflags) #define pending_w ((loop)->pending_w) #define pendingcnt ((loop)->pendingcnt) #define pendingmax ((loop)->pendingmax) #define pendingpri ((loop)->pendingpri) #define pendings ((loop)->pendings) #define periodiccnt ((loop)->periodiccnt) #define periodicmax ((loop)->periodicmax) #define periodics ((loop)->periodics) #define pipe_w ((loop)->pipe_w) #define pipe_write_skipped ((loop)->pipe_write_skipped) #define pipe_write_wanted ((loop)->pipe_write_wanted) #define pollcnt ((loop)->pollcnt) #define pollidxmax ((loop)->pollidxmax) #define pollidxs ((loop)->pollidxs) #define pollmax ((loop)->pollmax) #define polls ((loop)->polls) #define port_eventmax ((loop)->port_eventmax) #define port_events ((loop)->port_events) #define postfork ((loop)->postfork) #define preparecnt ((loop)->preparecnt) #define preparemax ((loop)->preparemax) #define prepares ((loop)->prepares) #define release_cb ((loop)->release_cb) #define rfeedcnt ((loop)->rfeedcnt) #define rfeedmax ((loop)->rfeedmax) #define rfeeds ((loop)->rfeeds) #define rtmn_diff ((loop)->rtmn_diff) #define sig_pending ((loop)->sig_pending) #define sigfd ((loop)->sigfd) #define sigfd_set ((loop)->sigfd_set) #define sigfd_w ((loop)->sigfd_w) #define timeout_blocktime ((loop)->timeout_blocktime) #define timercnt ((loop)->timercnt) #define timerfd ((loop)->timerfd) #define timerfd_w ((loop)->timerfd_w) #define timermax ((loop)->timermax) #define timers ((loop)->timers) #define userdata ((loop)->userdata) #define vec_eo ((loop)->vec_eo) #define vec_max ((loop)->vec_max) #define vec_ri ((loop)->vec_ri) #define vec_ro ((loop)->vec_ro) #define vec_wi ((loop)->vec_wi) #define vec_wo ((loop)->vec_wo) #else #undef EV_WRAP_H #undef acquire_cb #undef activecnt #undef anfdmax #undef anfds #undef async_pending #undef asynccnt #undef asyncmax #undef asyncs #undef backend #undef backend_fd #undef backend_mintime #undef backend_modify #undef backend_poll #undef checkcnt #undef checkmax #undef checks #undef cleanupcnt #undef cleanupmax #undef cleanups #undef curpid #undef epoll_epermcnt #undef epoll_epermmax #undef epoll_eperms #undef epoll_eventmax #undef epoll_events #undef evpipe #undef fdchangecnt #undef fdchangemax #undef fdchanges #undef forkcnt #undef forkmax #undef forks #undef fs_2625 #undef fs_fd #undef fs_hash #undef fs_w #undef idleall #undef idlecnt #undef idlemax #undef idles #undef invoke_cb #undef io_blocktime #undef iocp #undef iouring_cq_cqes #undef iouring_cq_head #undef iouring_cq_overflow #undef iouring_cq_ring #undef iouring_cq_ring_entries #undef iouring_cq_ring_mask #undef iouring_cq_ring_size #undef iouring_cq_tail #undef iouring_entries #undef iouring_fd #undef iouring_max_entries #undef iouring_sq_array #undef iouring_sq_dropped #undef iouring_sq_flags #undef iouring_sq_head #undef iouring_sq_ring #undef iouring_sq_ring_entries #undef iouring_sq_ring_mask #undef iouring_sq_ring_size #undef iouring_sq_tail #undef iouring_sqes #undef iouring_sqes_size #undef iouring_tfd #undef iouring_tfd_to #undef iouring_tfd_w #undef iouring_to_submit #undef kqueue_changecnt #undef kqueue_changemax #undef kqueue_changes #undef kqueue_eventmax #undef kqueue_events #undef kqueue_fd_pid #undef linuxaio_ctx #undef linuxaio_epoll_w #undef linuxaio_iocbpmax #undef linuxaio_iocbps #undef linuxaio_iteration #undef linuxaio_submitcnt #undef linuxaio_submitmax #undef linuxaio_submits #undef loop_count #undef loop_depth #undef loop_done #undef mn_now #undef now_floor #undef origflags #undef pending_w #undef pendingcnt #undef pendingmax #undef pendingpri #undef pendings #undef periodiccnt #undef periodicmax #undef periodics #undef pipe_w #undef pipe_write_skipped #undef pipe_write_wanted #undef pollcnt #undef pollidxmax #undef pollidxs #undef pollmax #undef polls #undef port_eventmax #undef port_events #undef postfork #undef preparecnt #undef preparemax #undef prepares #undef release_cb #undef rfeedcnt #undef rfeedmax #undef rfeeds #undef rtmn_diff #undef sig_pending #undef sigfd #undef sigfd_set #undef sigfd_w #undef timeout_blocktime #undef timercnt #undef timerfd #undef timerfd_w #undef timermax #undef timers #undef userdata #undef vec_eo #undef vec_max #undef vec_ri #undef vec_ro #undef vec_wi #undef vec_wo #endif nio4r-2.7.3/changes.md0000644000004100000410000002022614632135320014541 0ustar www-datawww-data## 2.7.2 * Modernize gem (list all authors, etc). * Drop official support for Ruby 2.4. * Fix JRuby release version. ## 2.7.1 * Fix license specification. * Fix JRuby build warnings. ## 2.7.0 * Convert NIO objects to TypedData API. ## 2.6.1 * Don't update `io` which is subsequently stored. Retain the original. ## 2.6.0 * Fix conversion loses int precision. * Avoid direct access to IO internals. * Resolve issue loading both nio and nio4r gems. ## 2.5.9 (2023-04-02) https://github.com/socketry/nio4r/compare/v2.5.8..v2.5.9 ## 2.5.8 (2021-08-03) * [#276](https://github.com/socketry/nio4r/pull/276) Fix missing return statement in function returning non-void (issue [#275](https://github.com/socketry/nio4r/pull/275)) ([@ioquatix]) * Remove `guard-rspec` from development dependencies ([@ioquatix]) ## 2.5.7 (2021-03-04) * [#267](https://github.com/socketry/nio4r/pull/267) Don't try to link universal extension ([@ioquatix]) ## 2.5.6 (2021-03-04) * [#268](https://github.com/socketry/nio4r/pull/268) Prefer kqueue when on OSX >= v10.12.2 ([@jcmfernandes]) ## 2.5.5 (2021-02-05) * [#256](https://github.com/socketry/nio4r/pull/256) Use libev 4.33, featuring experimental `io_uring` support. ([@jcmfernandes]) * [#260](https://github.com/socketry/nio4r/pull/260) Workaround for ARM-based macOS Ruby: Use pure Ruby for M1, since the native extension is crashing on M1 (arm64). ([@jasl]) * [#252](https://github.com/socketry/nio4r/pull/252) JRuby: Fix javac -Xlint warnings ([@headius]) ## 2.5.4 (2020-09-16) * [#251](https://github.com/socketry/nio4r/issues/251) Intermittent SEGV during GC. ([@boazsegev]) ## 2.5.3 (2020-09-07) * [#241](https://github.com/socketry/nio4r/issues/241) Possible bug with Ruby >= 2.7.0 and `GC.compact`. ([@boazsegev]) ## 2.5.2 (2019-09-24) * [#220](https://github.com/socketry/nio4r/issues/220) Update to libev-4.27 & fix assorted warnings. ([@ioquatix]) * [#225](https://github.com/socketry/nio4r/issues/225) Avoid need for linux headers. ([@ioquatix]) ## 2.4.0 (2019-07-07) * [#211](https://github.com/socketry/nio4r/pull/211) Enable KQUEUE on macOS 10.14+. ([@ioquatix]) * Bump minimum supported Ruby to 2.3. ([@ioquatix]) * Assorted fixes for TruffleRuby & JRuby. ([@eregon], [@olleolleolle]) Possible bug with Ruby >= 2.7.0 and `GC.compact` * Update libev to v4.25. ([@ioquatix]) * Bind to ephemeral (port 0) for more reliable specs. ([@ioquatix]) * Improve handling of SSL sockets and related specs. ([@MSP-Greg]) ## 2.3.1 (2018-05-03) * [#188](https://github.com/socketry/nio4r/pull/188) Fix remove interests ([@ioquatix]) ## 2.3.0 (2018-03-15) * [#183](https://github.com/socketry/nio4r/pull/183) Allow `Monitor#interests` to be nil ([@ioquatix]) ## 2.2.0 (2017-12-27) * [#151](https://github.com/socketry/nio4r/pull/151) `NIO::Selector`: Support for enumerating and configuring backend ([@tarcieri]) * [#153](https://github.com/socketry/nio4r/pull/153) Fix builds on Windows ([@unak]) * [#157](https://github.com/socketry/nio4r/pull/157) Windows / MinGW test failure - fix spec_helper.rb ([@MSP-Greg]) * [#162](https://github.com/socketry/nio4r/pull/162) Don't build the C extension on Windows ([@larskanis]) * [#164](https://github.com/socketry/nio4r/pull/164) Fix NIO::ByteBuffer leak ([@HoneyryderChuck]) * [#170](https://github.com/socketry/nio4r/pull/170) Avoid CancelledKeyExceptions on JRuby ([@HoneyryderChuck]) * [#177](https://github.com/socketry/nio4r/pull/177) Fix `NIO::ByteBuffer` string conversions on JRuby ([@tarcieri]) * [#179](https://github.com/socketry/nio4r/pull/179) Fix argument error when running on ruby 2.5.0 ([@tompng]) * [#180](https://github.com/socketry/nio4r/pull/180) ext/nio4r/extconf.rb: check for port_event_t in port.h (fixes #178) ([@tarcieri]) ## 2.1.0 (2017-05-28) * [#130](https://github.com/socketry/nio4r/pull/130) Add -fno-strict-aliasing flag when compiling C ext. ([@junaruga]) * [#146](https://github.com/socketry/nio4r/pull/146) Use non-blocking select when a timeout of 0 is given. ([@tarcieri]) * [#147](https://github.com/socketry/nio4r/pull/147) Update to libev 4.24. ([@tarcieri]) * [#148](https://github.com/socketry/nio4r/pull/148) Switch to the libev 4 API internally. ([@tarcieri]) ## 2.0.0 (2016-12-28) * [#53](https://github.com/socketry/nio4r/pull/53) Limit lock scope to prevent recursive locking. ([@johnnyt]) * [#95](https://github.com/socketry/nio4r/pull/95) NIO::ByteBuffer Google Summer of Code project. ([@UpeksheJay], [@tarcieri]) * [#111](https://github.com/socketry/nio4r/pull/111) NIO::Selector#backend introspection support. ([@tarcieri]) * [#112](https://github.com/socketry/nio4r/pull/112) Upgrade to libev 4.23. ([@tarcieri]) * [#119](https://github.com/socketry/nio4r/pull/119) Disambiguate wakeup vs timeout (fixes #63, #66). ([@tarcieri]) * [#124](https://github.com/socketry/nio4r/pull/124) Monitor interests API improvements. ([@tarcieri]) * Drop Ruby 2.0 and 2.1 support, require Ruby 2.2.2+. ([@tarcieri]) ## 1.2.1 (2016-01-31) * Fix bug in the JRuby backend which cases indefinite blocking when small timeout values are passed to the selector ## 1.2.0 (2015-12-22) * Add NIO::Monitor#interests= API for changing interests. Contributed by Upekshe Jayasekera as a Google Summer of Code project. * Update to libev 4.22 ## 1.1.1 (2015-07-17) * Update to libev 4.20 * Fall back to io.h if unistd.h is not found * RSpec updates * RuboCop ## 1.1.0 (2015-01-10) * Update to libev 4.19 * Do not call ev_io_stop on monitors if the loop is already closed ## 1.0.1 (2014-09-01) * Fix C compiler warnings * Eliminate Ruby warnings about @lock_holder * Windows improvements * Better support for Ruby 2.1 * Automatically require 'set' * Update to RSpec 3 ## 1.0.0 (2014-01-14) * Have Selector#register obtain the actual IO from a Monitor object because Monitor#initialize might convert it. * Drop 1.8 support ## 0.5.0 (2013-08-06) * Fix segv when attempting to register to a closed selector * Fix Windows support on Ruby 2.0.0 * Upgrade to libev 4.15 ## 0.4.6 (2013-05-27) * Fix for JRuby on Windows ## 0.4.5 * Fix botched gem release ## 0.4.4 * Fix return values for Selector_synchronize and Selector_unlock ## 0.4.3 * REALLY have thread synchronization when closing selectors ;) ## 0.4.2 * Attempt to work around packaging problems with bundler-api o_O ## 0.4.1 * Thread synchronization when closing selectors ## 0.4.0 * OpenSSL::SSL::SSLSocket support ## 0.3.3 * NIO::Selector#select_each removed * Remove event buffer * Patch GIL unlock directly into libev * Re-release since 0.3.2 was botched :( ## 0.3.1 * Prevent CancelledKeyExceptions on JRuby ## 0.3.0 * NIO::Selector#select now takes a block and behaves like select_each * NIO::Selector#select_each is now deprecated and will be removed * Closing monitors detaches them from their selector * Java extension for JRuby * Upgrade to libev 4.11 * Bugfixes for zero/negative select timeouts * Handle OP_CONNECT properly on JRuby ## 0.2.2 * Raise IOError if asked to wake up a closed selector ## 0.2.1 * Implement wakeup mechanism using raw pipes instead of ev_async, since ev_async likes to cause segvs when used across threads (despite claims in the documentation to the contrary) ## 0.2.0 * NIO::Monitor#readiness API to query readiness, along with #readable? and #writable? helper methods * NIO::Selector#select_each API which avoids memory allocations if possible * Bugfixes for the JRuby implementation ## 0.1.0 * Initial release. Merry Christmas! [@tarcieri]: https://github.com/tarcieri [@johnnyt]: https://github.com/johnnyt [@UpeksheJay]: https://github.com/UpeksheJay [@junaruga]: https://github.com/junaruga [@unak]: https://github.com/unak [@MSP-Greg]: https://github.com/MSP-Greg [@larskanis]: https://github.com/larskanis [@HoneyryderChuck]: https://github.com/HoneyryderChuck [@tompng]: https://github.com/tompng [@ioquatix]: https://github.com/ioquatix [@eregon]: https://github.com/eregon [@olleolleolle]: https://github.com/olleolleolle [@boazsegev]: https://github.com/boazsegev [@headius]: https://github.com/headius [@jasl]: https://github.com/jasl [@jcmfernandes]: https://github.com/jcmfernandes nio4r-2.7.3/metadata.gz.sig0000444000004100000410000000060014632135320015502 0ustar www-datawww-dataC[qd7x[x eVEr B83 U6ȡθ|~t'SϣaafS|94CCv[T6v1]Ǹ(a%jn%N)[X&Y2;DFÇGݱc@'Y:\e}Tb=*!(@|!<1=Kaj Qjd&i1WBa?.A?,PDu_ͳ9?*_Y$ 3t8̎Z"zUXcn}W