cool.io-1.8.1/0000755000004100000410000000000014632135713013120 5ustar www-datawww-datacool.io-1.8.1/.gitignore0000644000004100000410000000031514632135713015107 0ustar www-datawww-data## MAC OS .DS_Store ## TEXTMATE *.tmproj tmtags ## EMACS *~ \#* .\#* ## VIM *.swp ## PROJECT::GENERAL coverage rdoc pkg tmp ## RUBINIUS *.rbc ## PROJECT::SPECIFIC conftest.dSYM *.lock .ruby-version cool.io-1.8.1/cool.io.gemspec0000644000004100000410000000213214632135713016025 0ustar www-datawww-data# -*- encoding: utf-8 -*- $:.push File.expand_path("../lib", __FILE__) require "cool.io/version" Gem::Specification.new do |s| s.name = "cool.io" s.version = Coolio::VERSION s.authors = ["Tony Arcieri", "Masahiro Nakagawa"] s.email = ["tony.arcieri@gmail.com", "repeatedly@gmail.com"] s.homepage = "https://github.com/socketry/cool.io" s.summary = "A cool framework for doing high performance I/O in Ruby" s.description = "Cool.io provides a high performance event framework for Ruby which uses the libev C library" s.extensions = ["ext/cool.io/extconf.rb", "ext/iobuffer/extconf.rb"] s.licenses = ["MIT"] s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) } s.require_paths = ["lib"] s.add_development_dependency "rake-compiler", "~> 1.0" s.add_development_dependency "rake-compiler-dock", "~> 1.0" s.add_development_dependency "rspec", ">= 2.13.0" s.add_development_dependency "rdoc", ">= 3.6.0" end cool.io-1.8.1/libev_win_select.diff0000644000004100000410000001022514632135713017267 0ustar www-datawww-datadiff --git a/ext/libev/ev.c b/ext/libev/ev.c index dae87f1..d15f6bd 100644 --- a/ext/libev/ev.c +++ b/ext/libev/ev.c @@ -207,6 +207,7 @@ #else # include # define WIN32_LEAN_AND_MEAN +# define FD_SETSIZE 1024 # include # include # ifndef EV_SELECT_IS_WINSOCKET diff --git a/ext/libev/ev_select.c b/ext/libev/ev_select.c index f38d6ca..7050778 100644 --- a/ext/libev/ev_select.c +++ b/ext/libev/ev_select.c @@ -67,6 +67,54 @@ #include +#ifdef _WIN32 +/* +########## COOLIO PATCHERY HO! ########## + +Ruby undefs FD_* utilities for own implementation. +It converts fd argument into socket handle internally on Windows, +so libev should not use Ruby's FD_* utilities. + +Following FD_* utilities come from MinGW. +RubyInstaller is built by MinGW so this should work. +*/ +int PASCAL __WSAFDIsSet(SOCKET,fd_set*); +#define EV_WIN_FD_CLR(fd,set) do { u_int __i;\ +for (__i = 0; __i < ((fd_set *)(set))->fd_count ; __i++) {\ + if (((fd_set *)(set))->fd_array[__i] == (fd)) {\ + while (__i < ((fd_set *)(set))->fd_count-1) {\ + ((fd_set*)(set))->fd_array[__i] = ((fd_set*)(set))->fd_array[__i+1];\ + __i++;\ + }\ + ((fd_set*)(set))->fd_count--;\ + break;\ + }\ +}\ +} while (0) +#define EV_WIN_FD_SET(fd, set) do { u_int __i;\ +for (__i = 0; __i < ((fd_set *)(set))->fd_count ; __i++) {\ + if (((fd_set *)(set))->fd_array[__i] == (fd)) {\ + break;\ + }\ +}\ +if (__i == ((fd_set *)(set))->fd_count) {\ + if (((fd_set *)(set))->fd_count < FD_SETSIZE) {\ + ((fd_set *)(set))->fd_array[__i] = (fd);\ + ((fd_set *)(set))->fd_count++;\ + }\ +}\ +} while(0) +#define EV_WIN_FD_ZERO(set) (((fd_set *)(set))->fd_count=0) +#define EV_WIN_FD_ISSET(fd, set) __WSAFDIsSet((SOCKET)(fd), (fd_set *)(set)) +#define EV_WIN_FD_COUNT(set) (((fd_set *)(set))->fd_count) +/* ######################################## */ +#else +#define EV_WIN_FD_CLR FD_CLR +#define EV_WIN_FD_SET FD_SET +#define EV_WIN_FD_ZERO FD_ZERO +#define EV_WIN_FD_ISSET FD_ISSET +#endif + static void select_modify (EV_P_ int fd, int oev, int nev) { @@ -91,17 +139,17 @@ select_modify (EV_P_ int fd, int oev, int nev) if ((oev ^ nev) & EV_READ) #endif if (nev & EV_READ) - FD_SET (handle, (fd_set *)vec_ri); + EV_WIN_FD_SET (handle, (fd_set *)vec_ri); else - FD_CLR (handle, (fd_set *)vec_ri); + EV_WIN_FD_CLR (handle, (fd_set *)vec_ri); #if EV_SELECT_IS_WINSOCKET if ((oev ^ nev) & EV_WRITE) #endif if (nev & EV_WRITE) - FD_SET (handle, (fd_set *)vec_wi); + EV_WIN_FD_SET (handle, (fd_set *)vec_wi); else - FD_CLR (handle, (fd_set *)vec_wi); + EV_WIN_FD_CLR (handle, (fd_set *)vec_wi); #else @@ -197,8 +245,8 @@ select_poll (EV_P_ ev_tstamp timeout) { if (timeout) { - unsigned long ms = timeout * 1e3; - Sleep (ms ? ms : 1); + unsigned long ms = (unsigned long)(timeout * 1e3); + SleepEx (ms ? ms : 1, TRUE); } return; @@ -230,10 +278,10 @@ select_poll (EV_P_ ev_tstamp timeout) int handle = fd; #endif - if (FD_ISSET (handle, (fd_set *)vec_ro)) events |= EV_READ; - if (FD_ISSET (handle, (fd_set *)vec_wo)) events |= EV_WRITE; + if (EV_WIN_FD_ISSET (handle, (fd_set *)vec_ro)) events |= EV_READ; + if (EV_WIN_FD_ISSET (handle, (fd_set *)vec_wo)) events |= EV_WRITE; #ifdef _WIN32 - if (FD_ISSET (handle, (fd_set *)vec_eo)) events |= EV_WRITE; + if (EV_WIN_FD_ISSET (handle, (fd_set *)vec_eo)) events |= EV_WRITE; #endif if (expect_true (events)) @@ -279,9 +327,9 @@ select_init (EV_P_ int flags) backend_poll = select_poll; #if EV_SELECT_USE_FD_SET - vec_ri = ev_malloc (sizeof (fd_set)); FD_ZERO ((fd_set *)vec_ri); + vec_ri = ev_malloc (sizeof (fd_set)); EV_WIN_FD_ZERO ((fd_set *)vec_ri); vec_ro = ev_malloc (sizeof (fd_set)); - vec_wi = ev_malloc (sizeof (fd_set)); FD_ZERO ((fd_set *)vec_wi); + vec_wi = ev_malloc (sizeof (fd_set)); EV_WIN_FD_ZERO ((fd_set *)vec_wi); vec_wo = ev_malloc (sizeof (fd_set)); #ifdef _WIN32 vec_eo = ev_malloc (sizeof (fd_set)); cool.io-1.8.1/CHANGES.md0000644000004100000410000001767714632135713014534 0ustar www-datawww-data1.7.1 ----- * Set fallback local loopback address by default for Windows environment 1.7.0 ----- * Fix extension build failure for ruby3 1.6.1 ----- * Fix warning for recent compilers 1.6.0 ----- * Add ruby 2.6/2.7 to windows gem * Drop ruby 2.1/2.2/2.3 from windows gem 1.5.4 ----- * Add ruby 2.5 to windows gem 1.5.3 ----- * Fix invalid IOWatcher's rb_funcall that causes ArgumentError with ruby 2.5 and clang 1.5.2 ----- * Fix invalid TimerWatcher's rb_funcall that causes ArgumentError with ruby 2.5 and clang 1.5.1 ----- * Don't raise an exception when peername failed 1.5.0 ----- * Update libev to 4.24 1.4.6 ----- * Add ruby 2.4.0 to windows binary gem 1.4.5 ----- * Increase FD_SETSIZE to 1024 on Windows 1.4.4 ----- * Suppress lots of warnings 1.4.3 ----- * Use accept instead of accept_nonblock on Windows to avoid thundering held problem * Fix compilation error on Solaris and Ruby 2.3.0 1.4.2 ----- * Add unexpected object info to attach exception message 1.4.1 ----- * Use SleepEx instead of Sleep for better fix of process hung problem on windows environment * Use rake-compiler-dock for cross compilation 1.4.0 ----- * Update libev to 4.20 * Sleep in timeout instead of select on Windows 1.3.1 ----- * Fix several bugs for JRuby support enhancement * Fix deadlock bug on Windows environment * Use RSpec3 1.3.0 ----- * Block evaluation doesn't change self for keeping consistency with Ruby block * Remove EventMachine emulation module * Remove HttpClient * DSL syntax is no longer available by default. Need to require 'cool.io/dsl' in user code * Update libev to 4.19 1.2.4 ----- * Fix a bug that #close for unconnected Socket doesn't detach all watchers (#33) * Remove 1.8 support code * Use standard library instead of own hosts list (#34) 1.2.3 ----- * Fix CPU consuming issue on Windows. 1.2.2 ----- * Add timeout option to Loop#run and Loop#run_once. Default by nil * Support Ruby 2.2.0 1.2.1 ----- * Release the GIL when libev polls (#24) * Add Listener#listen method to change backlog size 1.2.0 ----- * Support Windows environment via cross compilation * Include iobuffer library * Update to libev 4.15 * Remove Ruby 1.8 support 1.1.0 ----- * Switch from Jeweler to Bundler for the gem boilerplate * Fix firing of Coolio::HttpClient#on_request_complete (#15) * Fix failure to resolve Init_cool symbol on win32 mingw (#14) * Fix closing /etc/hosts in the DNS resolver (#12) * Refactor StatWatcher to pass pervious and current path state ala Node.js * spec:valgrind Rake task to run specs under valgrind * Use rake-compiler to build cool.io * Upgrade to libev 4.04 1.0.0 ----- * Fancy new DSL 0.9.0 ----- * Rename the project to cool.io * Bump the version all the way to 0.9! Hell yeah! 1.0 soon! * Rename the main module from Rev to Coolio, with deprecation warnings for Rev * Use Jeweler to manage the gem * Update to RSpec 2.0 * Update to libev 4.01 * Initial Rubinius support 0.3.2 ----- * Perform a blocking system call if we're the only thread running (1.8 only) * Run in non-blocking mode if we're the only thread in the process (1.8 only) * Make Rev::Loop#run_nonblock signal-safe * Fix spurious firing of Rev::AsyncWatchers 0.3.1 ----- * Configurable intervals for Rev::StatWatcher * Fix broken version number :( * Removed warning about spuriously readable sockets from Rev::Listener * Rev::Listener ignores ECONNABORTED from accept_nonblock * Document rationale for EAGAIN/ECONNABORTED handling in Rev::Listener 0.3.0 ----- * Add Rev::StatWatcher to monitor filesystem changes * Add Rev::Listener#fileno for accessing the underlying file descriptor * Support for creating Rev::Listeners from existing TCPServers/UNIXServers * Upgrade to libev 3.8 * Simplified code loading * Pull in iobuffer gem and change outstanding uses of Rev::Buffer to IO::Buffer * Fix memory leaks resulting from strange semantics of Ruby's xrealloc * Rev::UNIXServer: use path instead of the first argument * Rev::Server-based classes can build off ::*Server objects 0.2.4 ----- * Ugh, botched my first release from the git repo. Oh well. Try, try again. 0.2.3 ----- * Initial Windows support * Initial Ruby 1.8.7 and 1.9.1 support * Upgrade to libev 3.52 * Add checks for sys/resource.h and don't allow getting/setting maxfds if it isn't present 0.2.2 ----- * Correct a pointer arithmetic error in the buffering code that could result in data corruption. * Upgrade to libev 3.41 * Relax HTTP/1.1 reponse parser to allow the "reason" portion of the response header to be omitted 0.2.1 ----- * Upgrade to libev 3.31 * Rev::Loop#run_once and Rev::Loop#run_nonblock now return the number of events received when they were running * Remove inheritence relationship between Rev::IO and Rev::IOWatcher * Loosen HTTP/1.1 response parser to accept a common malformation in HTTP/1.1 chunk headers * Add underscore prefix to instance variables to avoid conflicts in subclasses * Remove Rev::SSLServer until it can be made more useful 0.2.0 ----- * Initial Ruby 1.8.6 support * Omit Rev::LIBEV_VERSION constant * Catch Errno::ECONNRESET when writing to sockets * SSL support via Rev::SSL, with a small C extension subclassing Ruby's OpenSSL::SSL::SSLSocket allowing for non-blocking SSL handshakes * Initial Rev::Utils implementation with #ncpus and methods to query and change the maximum number of file descriptors for the current process. * Initial Rev::AsyncWatcher implementation for cross-thread signaling * Handle unspecified Content-Length when encoding is identity in HttpClient * Fix bug in HttpClient processing zero Content-Length * Get rid of method_missing stuff in Rev::HttpClient * Have Rev::HttpClient close the connection on error * Allow Rev::TCPSocket#on_connect to be private when accepting connections from a Rev::TCPServer 0.1.4 ----- * Calibrate Rev::TimerWatchers against ev_time() and ev_now() when the watcher is attached to the loop to ensure that the timeout interval is correct. * Add check to ensure that a Rev::Loop cannot be run from within a callback * Store Rev::Loop.default in a Thread-specific instance variable * Upgrade libev to 0.3.0 * Rename BufferedIO to IO * Fixed bug in BufferedIO#write_output_buffer causing it to spin endlessly on an empty buffer. * Added has_active_watchers? to Rev::Loop to check for active watchers 0.1.3 ----- * Fixed bug in Rev::Buffer read_from and write_to: now rb_sys_fail on failed reads/writes. * Change Rev::Buffer memory pools to purge on a periodic interval, rather than whenever the GC marks the object. * Fix bug in tracking the active watcher count. Factor shared watcher behavior from rev_watcher.h to rev_watcher.c. 0.1.2 ----- * Commit initial specs * Improve RDoc for the library * Eliminate "zero copy" writes as they bypass the event loop * Added Rev::Buffer C extension to provide high speed buffered writes * Implement Rev::TCPSocket#peeraddr to improve compatibility with Ruby sockets * Added Rev::Listener.close for clean shutdown of a listener * Rev::Loop.default used to call ev_loop_default() (in C). However, this registers signal handlers which conflict with Ruby's own. Now the behavior has been changed to return a thread-local singleton of Rev::Loop. * Creating a new Rev::TCPListener will disable reverse lookups in BasicSocket * Made backlog for Rev::TCPListener user-definable * Rev::TCPSocket now implements an on_resolve_failed callback for failed DNS resolution. By default it's aliased to on_connect_failed. * Changed event_callbacks to use instance_exec rather than passing the watcher object as an argument. Documented use of defining an event callback as a block * Subsecond precision for Rev::TimerWatchers 0.1.1 ----- * Added Rev::HttpClient, an asynchronous HTTP/1.1 client written on top of the Rev::TCPSocket class * Imported HTTP response parser from the RFuzz project * Added exception handling for Errno::ECONNRESET and Errno::EAGAIN * Fixed bugs in buffered writer which resulted in exceptions if all data couldn't be written with a nonblocking write. 0.1.0 ----- * Initial public releasecool.io-1.8.1/.github/0000755000004100000410000000000014632135713014460 5ustar www-datawww-datacool.io-1.8.1/.github/workflows/0000755000004100000410000000000014632135713016515 5ustar www-datawww-datacool.io-1.8.1/.github/workflows/test.yaml0000644000004100000410000000200014632135713020350 0ustar www-datawww-dataname: Test on: [push, pull_request] permissions: contents: read env: CONSOLE_OUTPUT: XTerm jobs: test: name: ${{matrix.ruby}} on ${{matrix.os}} runs-on: ${{matrix.os}}-latest continue-on-error: ${{matrix.experimental}} strategy: fail-fast: false matrix: os: - ubuntu - macos - windows ruby: - "3.0" - "3.1" - "3.2" - "head" experimental: [false] # include: # - os: ubuntu # ruby: truffleruby # experimental: true # - os: ubuntu # ruby: jruby # experimental: true # - os: ubuntu # ruby: head # experimental: true steps: - uses: actions/checkout@v3 - uses: ruby/setup-ruby@v1 with: ruby-version: ${{matrix.ruby}} bundler-cache: true - name: Run tests timeout-minutes: 2 run: bundle exec rake cool.io-1.8.1/lib/0000755000004100000410000000000014632135713013666 5ustar www-datawww-datacool.io-1.8.1/lib/.gitignore0000644000004100000410000000001614632135713015653 0ustar www-datawww-data*.so *.bundle cool.io-1.8.1/lib/cool.io.rb0000644000004100000410000000124514632135713015557 0ustar www-datawww-data#-- # Copyright (C)2011 Tony Arcieri # You can redistribute this under the terms of the Ruby license # See file LICENSE for details #++ require "cool.io/version" require "cool.io/custom_require" cool_require "iobuffer_ext" cool_require "cool.io_ext" require "cool.io/loop" require "cool.io/meta" require "cool.io/io" require "cool.io/iowatcher" require "cool.io/timer_watcher" require "cool.io/async_watcher" require "cool.io/listener" require "cool.io/dns_resolver" require "cool.io/socket" require "cool.io/server" module Coolio def self.inspect "Cool.io" end end module Cool # Allow Coolio module to be referenced as Cool.io def self.io Coolio end end cool.io-1.8.1/lib/coolio.rb0000644000004100000410000000011314632135713015472 0ustar www-datawww-data# For those people who don't like the cool.io styling... require 'cool.io' cool.io-1.8.1/lib/cool.io/0000755000004100000410000000000014632135713015230 5ustar www-datawww-datacool.io-1.8.1/lib/cool.io/dsl.rb0000644000004100000410000000753614632135713016352 0ustar www-datawww-data#-- # Copyright (C)2010 Tony Arcieri # You can redistribute this under the terms of the Ruby license # See file LICENSE for details #++ module Coolio # A module we stash all the connections defined by the DSL under module Connections; end # A DSL for defining Cool.io connection types and servers module DSL # Define all methods on the metaclass module_function # Run the default Cool.io event loop def run Cool.io::Loop.default.run end # Connect to the given host and port using the given connection class def connect(host, port, connection_name = nil, *initializer_args, &block) if block_given? initializer_args.unshift connection_name if connection_name klass = Class.new Cool.io::TCPSocket connection_builder = ConnectionBuilder.new klass connection_builder.instance_eval(&block) else raise ArgumentError, "no connection name or block given" unless connection_name klass = self[connection_name] end client = klass.connect host, port, *initializer_args client.attach Cool.io::Loop.default client end # Create a new Cool.io::TCPServer def server(host, port, connection_name = nil, *initializer_args, &block) if block_given? initializer_args.unshift connection_name if connection_name klass = Class.new Cool.io::TCPSocket connection_builder = ConnectionBuilder.new klass connection_builder.instance_eval(&block) else raise ArgumentError, "no connection name or block given" unless connection_name klass = self[connection_name] end server = Cool.io::TCPServer.new host, port, klass, *initializer_args server.attach Cool.io::Loop.default server end # Create a new Cool.io::TCPSocket class def connection(name, &block) # Camelize class name class_name = name.to_s.split('_').map { |s| s.capitalize }.join connection = Class.new Cool.io::TCPSocket connection_builder = ConnectionBuilder.new connection connection_builder.instance_eval(&block) Coolio::Connections.const_set class_name, connection end # Look up a connection class by its name def [](connection_name) class_name = connection_name.to_s.split('_').map { |s| s.capitalize }.join begin Coolio::Connections.const_get class_name rescue NameError raise NameError, "No connection type registered for #{connection_name.inspect}" end end # Builder for Cool.io::TCPSocket classes class ConnectionBuilder def initialize(klass) @klass = klass end # Declare an initialize function def initializer(&action) @klass.send :define_method, :initialize, &action end # Declare the on_connect callback def on_connect(&action) @klass.send :define_method, :on_connect, &action end # Declare a callback fired if we failed to connect def on_connect_failed(&action) @klass.send :define_method, :on_connect_failed, &action end # Declare a callback fired if DNS resolution failed def on_resolve_failed(&action) @klass.send :define_method, :on_resolve_failed, &action end # Declare the on_close callback def on_close(&action) @klass.send :define_method, :on_close, &action end # Declare the on_read callback def on_read(&action) @klass.send :define_method, :on_read, &action end # Declare the on_write_complete callback def on_write_complete(&action) @klass.send :define_method, :on_write_complete, &action end end end end # The Cool module containing all our coolness module Cool module Coolness def cool Cool::IOThunk end end module IOThunk def self.io Coolio::DSL end end end extend Cool::Coolness cool.io-1.8.1/lib/cool.io/server.rb0000644000004100000410000000541014632135713017063 0ustar www-datawww-data#-- # Copyright (C)2007-10 Tony Arcieri # You can redistribute this under the terms of the Ruby license # See file LICENSE for details #++ module Coolio class Server < Listener # Servers listen for incoming connections and create new connection objects # whenever incoming connections are received. The default class for new # connections is a Socket, but any subclass of IOWatcher is acceptable. def initialize(listen_socket, klass = Socket, *args, &block) # Ensure the provided class responds to attach unless klass.allocate.is_a? IO raise ArgumentError, "can't convert #{klass} to Coolio::IO" end # Verify the arity of the provided arguments arity = klass.instance_method(:initialize).arity expected = arity >= 0 ? arity : -(arity + 1) if (arity >= 0 and args.size + 1 != expected) or (arity < 0 and args.size + 1 < expected) raise ArgumentError, "wrong number of arguments for #{klass}#initialize (#{args.size+1} for #{expected})" end @klass, @args, @block = klass, args, block super(listen_socket) end # Returns an integer representing the underlying numeric file descriptor def fileno @listen_socket.fileno end ######### protected ######### def on_connection(socket) connection = @klass.new(socket, *@args).attach(evloop) connection.__send__(:on_connect) @block.call(connection) if @block end end # TCP server class. Listens on the specified host and port and creates # new connection objects of the given class. This is the most common server class. # Note that the new connection objects will be bound by default to the same event loop that the server is attached to. # Optionally, it can also take any existing core TCPServer object as # +host+ and create a Coolio::TCPServer out of it. class TCPServer < Server def initialize(host, port = nil, klass = TCPSocket, *args, &block) listen_socket = if ::TCPServer === host host else raise ArgumentError, "port must be an integer" if nil == port ::TCPServer.new(host, port) end listen_socket.instance_eval { listen(DEFAULT_BACKLOG) } # Change listen backlog to 1024 super(listen_socket, klass, *args, &block) end end # UNIX server class. Listens on the specified UNIX domain socket and # creates new connection objects of the given class. # Optionally, it can also take any existing core UNIXServer object as # +path+ and create a Coolio::UNIXServer out of it. class UNIXServer < Server def initialize(path, klass = UNIXSocket, *args, &block) s = ::UNIXServer === path ? path : ::UNIXServer.new(path) s.instance_eval { listen(DEFAULT_BACKLOG) } super(s, klass, *args, &block) end end end cool.io-1.8.1/lib/cool.io/listener.rb0000644000004100000410000000676514632135713017420 0ustar www-datawww-data#-- # Copyright (C)2007-10 Tony Arcieri # You can redistribute this under the terms of the Ruby license # See file LICENSE for details #++ require 'socket' module Coolio # Listeners wait for incoming connections. When a listener receives a # connection it fires the on_connection event with the newly accepted # socket as a parameter. class Listener < IOWatcher def initialize(listen_socket) @listen_socket = listen_socket super(@listen_socket) end # Returns an integer representing the underlying numeric file descriptor def fileno @listen_socket.fileno end def listen(backlog) @listen_socket.listen(backlog) end # Close the listener def close detach if attached? @listen_socket.close end # Called whenever the server receives a new connection def on_connection(socket); end event_callback :on_connection ######### protected ######### # Coolio callback for handling new connections unless RUBY_PLATFORM =~ /mingw|mswin/ def on_readable begin on_connection @listen_socket.accept_nonblock rescue Errno::EAGAIN, Errno::ECONNABORTED # EAGAIN can be triggered here if the socket is shared between # multiple processes and a thundering herd is woken up to accept # one connection, only one process will get the connection and # the others will be awoken. # ECONNABORTED is documented in accept() manpages but modern TCP # stacks with syncookies and/or accept()-filtering for DoS # protection do not see it. In any case this error is harmless # and we should instead spend our time with clients that follow # through on connection attempts. end end else def on_readable begin # In Windows, accept_nonblock() with multiple processes # causes thundering herd problem. # To avoid this, we need to use accept(). on_connection @listen_socket.accept rescue Errno::EAGAIN, Errno::ECONNABORTED end end end end DEFAULT_BACKLOG = 1024 class TCPListener < Listener # Create a new Coolio::TCPListener on the specified address and port. # Accepts the following options: # # :backlog - Max size of the pending connection queue (default 1024) # :reverse_lookup - Retain BasicSocket's reverse DNS functionality (default false) # # If the specified address is an TCPServer object, it will ignore # the port and :backlog option and create a new Coolio::TCPListener out # of the existing TCPServer object. def initialize(addr, port = nil, options = {}) BasicSocket.do_not_reverse_lookup = true unless options[:reverse_lookup] options[:backlog] ||= DEFAULT_BACKLOG listen_socket = if ::TCPServer === addr addr else raise ArgumentError, "port must be an integer" if nil == port ::TCPServer.new(addr, port) end listen_socket.instance_eval { listen(options[:backlog]) } super(listen_socket) end end class UNIXListener < Listener # Create a new Coolio::UNIXListener # # Accepts the same arguments as UNIXServer.new # Optionally, it can also take anyn existing UNIXServer object # and create a Coolio::UNIXListener out of it. def initialize(*args) s = ::UNIXServer === args.first ? args.first : ::UNIXServer.new(*args) s.instance_eval { listen(DEFAULT_BACKLOG) } super(s) end end end cool.io-1.8.1/lib/cool.io/custom_require.rb0000644000004100000410000000024114632135713020620 0ustar www-datawww-datadef cool_require(gem) begin m = /(\d+.\d+)/.match(RUBY_VERSION) ver = m[1] require "#{ver}/#{gem}.so" rescue LoadError require gem end end cool.io-1.8.1/lib/cool.io/dns_resolver.rb0000644000004100000410000001574514632135713020276 0ustar www-datawww-data#-- # Copyright (C)2007-10 Tony Arcieri # You can redistribute this under the terms of the Ruby license # See file LICENSE for details # # Gimpy hacka asynchronous DNS resolver # # Word to the wise: I don't know what I'm doing here. This was cobbled together # as best I could with extremely limited knowledge of the DNS format. There's # obviously a ton of stuff it doesn't support (like IPv6 and TCP). # # If you do know what you're doing with DNS, feel free to improve this! # A good starting point my be this EventMachine Net::DNS-based asynchronous # resolver: # # http://gist.github.com/663299 # #++ require 'resolv' module Coolio # A non-blocking DNS resolver. It provides interfaces for querying both # /etc/hosts and nameserves listed in /etc/resolv.conf, or nameservers of # your choosing. # # Presently the client only supports UDP requests against your nameservers # and cannot resolve anything with records larger than 512-bytes. Also, # IPv6 is not presently supported. # # DNSResolver objects are one-shot. Once they resolve a domain name they # automatically detach themselves from the event loop and cannot be used # again. class DNSResolver < IOWatcher #-- DNS_PORT = 53 DATAGRAM_SIZE = 512 TIMEOUT = 3 # Retry timeout for each datagram sent RETRIES = 4 # Number of retries to attempt # so currently total is 12s before it will err due to timeouts # if it errs due to inability to reach the DNS server [Errno::EHOSTUNREACH], same # Query /etc/hosts (or the specified hostfile) for the given host def self.hosts(host, hostfile = Resolv::Hosts::DefaultFileName) hosts = {} File.open(hostfile) do |f| f.each_line do |host_entry| entries = host_entry.gsub(/#.*$/, '').gsub(/\s+/, ' ').split(' ') addr = entries.shift entries.each { |e| hosts[e] ||= addr } end end unless hosts.key?("localhost") # On Windows, there is a case that hosts file doesn't have entry by default # and preferred IPv4/IPv6 behavior may be changed by registry key [1], so # "localhost" should be resolved by getaddrinfo. # (first[3] means preferred resolved IP address ::1 or 127.0.0.1) # [1] https://docs.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-ipv6-in-windows require "socket" hosts["localhost"] = ::Socket.getaddrinfo("localhost", nil).first[3] end hosts[host] end # Create a new Coolio::Watcher descended object to resolve the # given hostname. If you so desire you can also specify a # list of nameservers to query. By default the resolver will # use nameservers listed in /etc/resolv.conf def initialize(hostname, *nameservers) if nameservers.empty? nameservers = Resolv::DNS::Config.default_config_hash[:nameserver] raise RuntimeError, "no nameservers found" if nameservers.empty? # TODO just call resolve_failed, not raise [also handle Errno::ENOENT)] end @nameservers = nameservers @question = request_question hostname @socket = UDPSocket.new @timer = Timeout.new(self) super(@socket) end # Attach the DNSResolver to the given event loop def attach(evloop) send_request @timer.attach(evloop) super end # Detach the DNSResolver from the given event loop def detach @timer.detach if @timer.attached? super end # Called when the name has successfully resolved to an address def on_success(address); end event_callback :on_success # Called when we receive a response indicating the name didn't resolve def on_failure; end event_callback :on_failure # Called if we don't receive a response, defaults to calling on_failure def on_timeout on_failure end ######### protected ######### # Send a request to the DNS server def send_request nameserver = @nameservers.shift @nameservers << nameserver # rotate them begin @socket.send request_message, 0, @nameservers.first, DNS_PORT rescue Errno::EHOSTUNREACH # TODO figure out why it has to be wrapper here, when the other wrapper should be wrapping this one! end end # Called by the subclass when the DNS response is available def on_readable datagram = nil begin datagram = @socket.recvfrom_nonblock(DATAGRAM_SIZE).first rescue Errno::ECONNREFUSED end address = response_address datagram rescue nil address ? on_success(address) : on_failure detach end def request_question(hostname) raise ArgumentError, "hostname cannot be nil" if hostname.nil? # Query name message = hostname.split('.').map { |s| [s.size].pack('C') << s }.join + "\0" # Host address query qtype = 1 # Internet query qclass = 1 message << [qtype, qclass].pack('nn') end def request_message # Standard query header message = [2, 1, 0].pack('nCC') # One entry qdcount = 1 # No answer, authority, or additional records ancount = nscount = arcount = 0 message << [qdcount, ancount, nscount, arcount].pack('nnnn') message << @question end def response_address(message) # Confirm the ID field id = message[0..1].unpack('n').first.to_i return unless id == 2 # Check the QR value and confirm this message is a response qr = message[2..2].unpack('B1').first.to_i return unless qr == 1 # Check the RCODE (lower nibble) and ensure there wasn't an error rcode = message[3..3].unpack('B8').first[4..7].to_i(2) return unless rcode == 0 # Extract the question and answer counts qdcount, _ancount = message[4..7].unpack('nn').map { |n| n.to_i } # We only asked one question return unless qdcount == 1 message.slice!(0, 12) # Make sure it's the same question return unless message[0..(@question.size-1)] == @question message.slice!(0, @question.size) # Extract the RDLENGTH while not message.empty? type = message[2..3].unpack('n').first.to_i rdlength = message[10..11].unpack('n').first.to_i rdata = message[12..(12 + rdlength - 1)] message.slice!(0, 12 + rdlength) # Only IPv4 supported next unless rdlength == 4 # If we got an Internet address back, return it return rdata.unpack('CCCC').join('.') if type == 1 end nil end class Timeout < TimerWatcher def initialize(resolver) @resolver = resolver @attempts = 0 super(TIMEOUT, true) end def on_timer @attempts += 1 if @attempts <= RETRIES begin return @resolver.__send__(:send_request) rescue Errno::EHOSTUNREACH # if the DNS is toast try again after the timeout occurs again return nil end end @resolver.__send__(:on_timeout) @resolver.detach end end end end cool.io-1.8.1/lib/cool.io/async_watcher.rb0000644000004100000410000000211214632135713020403 0ustar www-datawww-data#-- # Copyright (C)2007-10 Tony Arcieri # You can redistribute this under the terms of the Ruby license # See file LICENSE for details #++ module Coolio # The AsyncWatcher lets you signal another thread to wake up. Its # intended use is notifying another thread of events. class AsyncWatcher < IOWatcher def initialize @reader, @writer = ::IO.pipe super(@reader) end # Signal the async watcher. This call is thread safe. def signal # Write a byte to the pipe. What we write is meaningless, it # merely signals an event has occurred for each byte written. @writer.write "\0" end # Called whenever a signal is received def on_signal; end event_callback :on_signal ######### protected ######### def on_readable # Read a byte from the pipe. This clears readability, unless # another signal is pending begin @reader.read_nonblock 1 rescue Errno::EAGAIN # in case there are spurious wakeups from forked processs return end on_signal end end end cool.io-1.8.1/lib/cool.io/io.rb0000644000004100000410000001110214632135713016157 0ustar www-datawww-data#-- # Copyright (C)2007-10 Tony Arcieri # You can redistribute this under the terms of the Ruby license # See file LICENSE for details #++ module Coolio # A buffered I/O class witch fits into the Coolio Watcher framework. # It provides both an observer which reads data as it's received # from the wire and a buffered write watcher which stores data and writes # it out each time the socket becomes writable. # # This class is primarily meant as a base class for other streams # which need non-blocking writing, and is used to implement Coolio's # Socket class and its associated subclasses. class IO extend Meta # Maximum number of bytes to consume at once INPUT_SIZE = 16384 def initialize(io) @_io = io @_write_buffer ||= ::IO::Buffer.new @_read_watcher = Watcher.new(io, self, :r) @_write_watcher = Watcher.new(io, self, :w) end # # Watcher methods, delegated to @_read_watcher # # Attach to the event loop def attach(loop) @_read_watcher.attach(loop) schedule_write if !@_write_buffer.empty? self end # Detach from the event loop def detach # TODO should these detect write buffers, as well? @_read_watcher.detach self end # Enable the watcher def enable @_read_watcher.enable self end # Disable the watcher def disable @_read_watcher.disable self end # Is the watcher attached? def attached? @_read_watcher.attached? end # Is the watcher enabled? def enabled? @_read_watcher.enabled? end # Obtain the event loop associated with this object def evloop @_read_watcher.evloop end # # Callbacks for asynchronous events # # Called whenever the IO object receives data def on_read(data); end event_callback :on_read # Called whenever a write completes and the output buffer is empty def on_write_complete; end event_callback :on_write_complete # Called whenever the IO object hits EOF def on_close; end event_callback :on_close # # Write interface # # Write data in a buffered, non-blocking manner def write(data) @_write_buffer << data schedule_write data.size end # Close the IO stream def close detach if attached? detach_write_watcher @_io.close unless closed? on_close nil end # Is the IO object closed? def closed? @_io.nil? or @_io.closed? end ######### protected ######### # Read from the input buffer and dispatch to on_read def on_readable begin on_read @_io.read_nonblock(INPUT_SIZE) rescue Errno::EAGAIN, Errno::EINTR return # SystemCallError catches Errno::ECONNRESET amongst others. rescue SystemCallError, EOFError, IOError, SocketError close end end # Write the contents of the output buffer def on_writable begin @_write_buffer.write_to(@_io) rescue Errno::EINTR return # SystemCallError catches Errno::EPIPE & Errno::ECONNRESET amongst others. rescue SystemCallError, IOError, SocketError return close end if @_write_buffer.empty? disable_write_watcher on_write_complete end end # Schedule a write to be performed when the IO object becomes writable def schedule_write return unless @_io # this would mean 'we are still pre DNS here' return unless @_read_watcher.attached? # this would mean 'currently unattached' -- ie still pre DNS, or just plain not attached, which is ok begin enable_write_watcher rescue IOError end end def enable_write_watcher if @_write_watcher.attached? @_write_watcher.enable unless @_write_watcher.enabled? else @_write_watcher.attach(evloop) end end def disable_write_watcher @_write_watcher.disable if @_write_watcher and @_write_watcher.enabled? end def detach_write_watcher @_write_watcher.detach if @_write_watcher and @_write_watcher.attached? end # Internal class implementing watchers used by Coolio::IO class Watcher < IOWatcher def initialize(ruby_io, coolio_io, flags) @coolio_io = coolio_io super(ruby_io, flags) end # Configure IOWatcher event callbacks to call the method passed to #initialize def on_readable @coolio_io.__send__(:on_readable) end def on_writable @coolio_io.__send__(:on_writable) end end end end cool.io-1.8.1/lib/cool.io/iowatcher.rb0000644000004100000410000000074714632135713017552 0ustar www-datawww-data#-- # Copyright (C)2007-10 Tony Arcieri # You can redistribute this under the terms of the Ruby license # See file LICENSE for details #++ module Coolio class IOWatcher # The actual implementation of this class resides in the C extension # Here we metaprogram proper event_callbacks for the callback methods # These can take a block and store it to be called when the event # is actually fired. extend Meta event_callback :on_readable, :on_writable end end cool.io-1.8.1/lib/cool.io/meta.rb0000644000004100000410000000303514632135713016504 0ustar www-datawww-data#-- # Copyright (C)2007-10 Tony Arcieri # You can redistribute this under the terms of the Ruby license # See file LICENSE for details #++ module Coolio module Meta # Use an alternate watcher with the attach/detach/enable/disable methods # if it is presently assigned. This is useful if you are waiting for # an event to occur before the current watcher can be used in earnest, # such as making an outgoing TCP connection. def watcher_delegate(proxy_var) %w{attach attached? detach enable disable}.each do |method| module_eval <<-EOD def #{method}(*args) if defined? #{proxy_var} and #{proxy_var} #{proxy_var}.#{method}(*args) return self end super end EOD end end # Define callbacks whose behavior can be changed on-the-fly per instance. # This is done by giving a block to the callback method, which is captured # as a proc and stored for later. If the method is called without a block, # the stored block is executed if present, otherwise it's a noop. def event_callback(*methods) methods.each do |method| module_eval <<-EOD remove_method "#{method}" def #{method}(*args, &block) if block @#{method}_callback = block return end if defined? @#{method}_callback and @#{method}_callback @#{method}_callback.call(*args) end end EOD end end end end cool.io-1.8.1/lib/cool.io/version.rb0000644000004100000410000000011614632135713017240 0ustar www-datawww-datamodule Coolio VERSION = "1.8.1" def self.version VERSION end end cool.io-1.8.1/lib/cool.io/loop.rb0000644000004100000410000000651414632135713016534 0ustar www-datawww-data#-- # Copyright (C)2007-10 Tony Arcieri # You can redistribute this under the terms of the Ruby license # See file LICENSE for details #++ require 'thread' # Monkeypatch Thread to include a method for obtaining the default Coolio::Loop class Thread def _coolio_loop @_coolio_loop ||= Coolio::Loop.new end end module Coolio class Loop # Retrieve the default event loop for the current thread def self.default Thread.current._coolio_loop end # Create a new Coolio::Loop # # Options: # # :skip_environment (boolean) # Ignore the $LIBEV_FLAGS environment variable # # :fork_check (boolean) # Enable autodetection of forks # # :backend # Choose the default backend, one (or many in an array) of: # :select (most platforms) # :poll (most platforms except Windows) # :epoll (Linux) # :kqueue (BSD/Mac OS X) # :port (Solaris 10) # def initialize(options = {}) @watchers = {} @active_watchers = 0 flags = 0 options.each do |option, value| case option when :skip_environment flags |= EVFLAG_NOEV if value when :fork_check flags |= EVFLAG_FORKCHECK if value when :backend value = [value] unless value.is_a? Array value.each do |backend| case backend when :select then flags |= EVBACKEND_SELECT when :poll then flags |= EVBACKEND_POLL when :epoll then flags |= EVBACKEND_EPOLL when :kqueue then flags |= EVBACKEND_KQUEUE when :port then flags |= EVBACKEND_PORT else raise ArgumentError, "no such backend: #{backend}" end end else raise ArgumentError, "no such option: #{option}" end end @loop = ev_loop_new(flags) end # Attach a watcher to the loop def attach(watcher) watcher.attach self end # Run the event loop and dispatch events back to Ruby. If there # are no watchers associated with the event loop it will return # immediately. Otherwise, run will continue blocking and making # event callbacks to watchers until all watchers associated with # the loop have been disabled or detached. The loop may be # explicitly stopped by calling the stop method on the loop object. def run(timeout = nil) raise RuntimeError, "no watchers for this loop" if @watchers.empty? @running = true while @running and not @active_watchers.zero? run_once(timeout) end @running = false end # Stop the event loop if it's running def stop raise RuntimeError, "loop not running" unless @running @running = false end # Does the loop have any active watchers? def has_active_watchers? @active_watchers > 0 end # All watchers attached to the current loop def watchers @watchers.keys end ####### private ####### EVFLAG_NOENV = 0x1000000 # do NOT consult environment EVFLAG_FORKCHECK = 0x2000000 # check for a fork in each iteration EVBACKEND_SELECT = 0x00000001 # supported about anywhere EVBACKEND_POLL = 0x00000002 # !win EVBACKEND_EPOLL = 0x00000004 # linux EVBACKEND_KQUEUE = 0x00000008 # bsd EVBACKEND_PORT = 0x00000020 # solaris 10 end end cool.io-1.8.1/lib/cool.io/timer_watcher.rb0000644000004100000410000000072614632135713020417 0ustar www-datawww-data#-- # Copyright (C)2007 Tony Arcieri # You can redistribute this under the terms of the Ruby license # See file LICENSE for details #++ module Coolio class TimerWatcher # The actual implementation of this class resides in the C extension # Here we metaprogram proper event_callbacks for the callback methods # These can take a block and store it to be called when the event # is actually fired. extend Meta event_callback :on_timer end end cool.io-1.8.1/lib/cool.io/socket.rb0000644000004100000410000001426414632135713017054 0ustar www-datawww-data#-- # Copyright (C)2007 Tony Arcieri # You can redistribute this under the terms of the Ruby license # See file LICENSE for details #++ require 'socket' require 'resolv' module Coolio class Socket < IO def self.connect(socket, *args) new(socket, *args).instance_eval do @_connector = Connector.new(self, socket) self end end # Just initializes some instance variables to avoid # warnings and calls super(). def initialize *args @_failed = nil @_connector = nil super end watcher_delegate :@_connector remove_method :attach def attach(evloop) raise RuntimeError, "connection failed" if @_failed if @_connector @_connector.attach(evloop) return self end super end # Called upon completion of a socket connection def on_connect; end event_callback :on_connect # Called if a socket connection failed to complete def on_connect_failed; end event_callback :on_connect_failed # Called if a hostname failed to resolve when connecting # Defaults to calling on_connect_failed alias_method :on_resolve_failed, :on_connect_failed ######### protected ######### class Connector < IOWatcher def initialize(coolio_socket, ruby_socket) @coolio_socket, @ruby_socket = coolio_socket, ruby_socket super(ruby_socket, :w) end def on_writable evl = evloop detach if connect_successful? @coolio_socket.instance_eval { @_connector = nil } @coolio_socket.attach(evl) @ruby_socket.setsockopt(::Socket::IPPROTO_TCP, ::Socket::TCP_NODELAY, [1].pack("l")) @ruby_socket.setsockopt(::Socket::SOL_SOCKET, ::Socket::SO_KEEPALIVE, true) @coolio_socket.__send__(:on_connect) else @coolio_socket.instance_eval { @_failed = true } @coolio_socket.__send__(:on_connect_failed) end end ####### private ####### def connect_successful? @ruby_socket.getsockopt(::Socket::SOL_SOCKET, ::Socket::SO_ERROR).unpack('i').first == 0 rescue IOError false end end end class TCPSocket < Socket attr_reader :remote_host, :remote_addr, :remote_port, :address_family watcher_delegate :@_resolver # Similar to .new, but used in cases where the resulting object is in a # "half-open" state. This is primarily used for when asynchronous # DNS resolution is taking place. We don't actually have a handle to # the socket we want to use to create the watcher yet, since we don't # know the IP address to connect to. def self.precreate(*args, &block) obj = allocate obj.__send__(:preinitialize, *args, &block) obj end # Perform a non-blocking connect to the given host and port # see examples/echo_client.rb # addr is a string, can be an IP address or a hostname. def self.connect(addr, port, *args) family = nil if (Resolv::IPv4.create(addr) rescue nil) family = ::Socket::AF_INET elsif(Resolv::IPv6.create(addr) rescue nil) family = ::Socket::AF_INET6 end if family return super(TCPConnectSocket.new(family, addr, port), *args) # this creates a 'real' write buffer so we're ok there with regards to already having a write buffer from the get go end if host = Coolio::DNSResolver.hosts(addr) return connect(host, port, *args) # calls this same function end precreate(addr, port, *args) end # Called by precreate during asyncronous DNS resolution def preinitialize(addr, port, *args) @_write_buffer = ::IO::Buffer.new # allow for writing BEFORE DNS has resolved @remote_host, @remote_addr, @remote_port = addr, addr, port @_resolver = TCPConnectResolver.new(self, addr, port, *args) end private :preinitialize PEERADDR_FAILED = ["?", 0, "name resolusion failed", "?"] def initialize(socket) unless socket.is_a?(::TCPSocket) or socket.is_a?(TCPConnectSocket) raise TypeError, "socket must be a TCPSocket" end super @address_family, @remote_port, @remote_host, @remote_addr = (socket.peeraddr rescue PEERADDR_FAILED) end def peeraddr [@address_family, @remote_port, @remote_host, @remote_addr] end ######### protected ######### class TCPConnectSocket < ::Socket def initialize(family, addr, port, host = addr) @host, @addr, @port = host, addr, port @address_family = nil super(family, ::Socket::SOCK_STREAM, 0) begin connect_nonblock(::Socket.sockaddr_in(port, addr)) rescue Errno::EINPROGRESS end end def peeraddr [ @address_family == ::Socket::AF_INET ? 'AF_INET' : 'AF_INET6', @port, @host, @addr ] end end class TCPConnectResolver < Coolio::DNSResolver def initialize(socket, host, port, *args) @sock, @host, @port, @args = socket, host, port, args super(host) end def on_success(addr) host, port, args = @host, @port, @args @sock.instance_eval do # DNSResolver only supports IPv4 so we can safely assume IPv4 address begin socket = TCPConnectSocket.new(::Socket::AF_INET, addr, port, host) rescue Errno::ENETUNREACH on_connect_failed return end initialize(socket, *args) @_connector = Socket::Connector.new(self, socket) @_resolver = nil end @sock.attach(evloop) end def on_failure @sock.__send__(:on_resolve_failed) @sock.instance_eval do @_resolver = nil @_failed = true end return end end end class UNIXSocket < Socket attr_reader :path, :address_family # Connect to the given UNIX domain socket def self.connect(path, *args) new(::UNIXSocket.new(path), *args) end def initialize(socket) raise ArgumentError, "socket must be a UNIXSocket" unless socket.is_a? ::UNIXSocket super @address_family, @path = socket.peeraddr end end end cool.io-1.8.1/spec/0000755000004100000410000000000014632135713014052 5ustar www-datawww-datacool.io-1.8.1/spec/unix_listener_spec.rb0000644000004100000410000000135414632135713020304 0ustar www-datawww-datarequire File.expand_path('../spec_helper', __FILE__) require 'tempfile' describe Cool.io::UNIXListener, :env => :exclude_win do before :each do @tmp = Tempfile.new('coolio_unix_listener_spec') expect(File.unlink(@tmp.path)).to eq(1) expect(File.exist?(@tmp.path)).to eq(false) end it "creates a new UNIXListener" do _listener = Cool.io::UNIXListener.new(@tmp.path) expect(File.socket?(@tmp.path)).to eq(true) end it "builds off an existing UNIXServer" do unix_server = UNIXServer.new(@tmp.path) expect(File.socket?(@tmp.path)).to eq(true) listener = Cool.io::UNIXListener.new(unix_server) expect(File.socket?(@tmp.path)).to eq(true) expect(listener.fileno).to eq(unix_server.fileno) end end cool.io-1.8.1/spec/spec_helper.rb0000644000004100000410000000057514632135713016677 0ustar www-datawww-data$LOAD_PATH.unshift File.dirname(__FILE__) $LOAD_PATH.unshift File.expand_path('../../lib', __FILE__) require 'rspec' require 'cool.io' def unused_port s = TCPServer.open(0) port = s.addr[1] s.close port end RSpec.configure do |c| if RUBY_PLATFORM =~ /mingw|mswin/ $stderr.puts "Skip some specs on Windows" c.filter_run_excluding :env => :exclude_win end end cool.io-1.8.1/spec/async_watcher_spec.rb0000644000004100000410000000307014632135713020243 0ustar www-datawww-datarequire File.expand_path('../spec_helper', __FILE__) require 'tempfile' require 'fcntl' describe Cool.io::AsyncWatcher, :env => :exclude_win do it "does not signal on spurious wakeups" do aw = Cool.io::AsyncWatcher.new tmp = Tempfile.new('coolio_async_watcher_test') nr_fork = 2 # must be at least two for spurious wakeups # We have aetter chance of failing if this overflows the pipe buffer # which POSIX requires >= 512 bytes, Linux 2.6 uses 4096 bytes nr_signal = 4096 * 4 append = File.open(tmp.path, "ab") append.sync = true rd, wr = ::IO.pipe aw.on_signal { append.syswrite("#$$\n") } children = nr_fork.times.map do fork do trap(:TERM) { exit!(0) } rloop = Cool.io::Loop.default aw.attach(rloop) wr.write '.' # signal to master that we're ready rloop.run exit!(1) # should not get here end end # ensure children are ready nr_fork.times { expect(rd.sysread(1)).to eq('.') } # send our signals nr_signal.times { aw.signal } # wait for the pipe buffer to be consumed by the children sleep 1 while tmp.stat.ctime >= (Time.now - 4) children.each do |pid| Process.kill(:TERM, pid) _, status = Process.waitpid2(pid) expect(status.exitstatus).to eq(0) end # we should've written a line for every signal we sent lines = tmp.readlines expect(lines.size).to eq(nr_signal) # theoretically a bad kernel scheduler could give us fewer... expect(lines.sort.uniq.size).to eq(nr_fork) tmp.close! end end cool.io-1.8.1/spec/dns_spec.rb0000644000004100000410000000264314632135713016202 0ustar www-datawww-datarequire File.expand_path('../spec_helper', __FILE__) VALID_DOMAIN = "google.com" INVALID_DOMAIN = "gibidigibigididibitidibigitibidigitidididi.com" class ItWorked < StandardError; end class WontResolve < StandardError; end class ConnectorThingy < Cool.io::TCPSocket def on_connect raise ItWorked end def on_resolve_failed raise WontResolve end end describe "DNS" do before :each do @loop = Cool.io::Loop.new @preferred_localhost_address = ::Socket.getaddrinfo("localhost", nil).first[3] end it "connects to valid domains" do begin c = ConnectorThingy.connect(VALID_DOMAIN, 80).attach(@loop) expect do @loop.run end.to raise_error(ItWorked) ensure c.close end end it "fires on_resolve_failed for invalid domains" do ConnectorThingy.connect(INVALID_DOMAIN, 80).attach(@loop) expect do @loop.run end.to raise_error(WontResolve) end it "resolve localhost even though hosts is empty" do Tempfile.open("empty") do |file| expect( Coolio::DNSResolver.hosts("localhost", file.path)).to eq @preferred_localhost_address end end it "resolve missing localhost even though hosts entries exist" do Tempfile.open("empty") do |file| file.puts("127.0.0.1 example.internal") file.flush expect( Coolio::DNSResolver.hosts("localhost", file.path)).to eq @preferred_localhost_address end end end cool.io-1.8.1/spec/stat_watcher_spec.rb0000644000004100000410000000301714632135713020102 0ustar www-datawww-datarequire File.expand_path('../spec_helper', __FILE__) TEMP_FILE_PATH = "./test.txt" INTERVAL = 0.010 class MyStatWatcher < Cool.io::StatWatcher attr_accessor :accessed, :previous, :current def initialize(path) super path, INTERVAL end def on_change(previous, current) self.accessed = true self.previous = previous self.current = current end end def run_with_file_change(path) reactor = Cool.io::Loop.new sw = MyStatWatcher.new(path) sw.attach(reactor) tw = Cool.io::TimerWatcher.new(INTERVAL, true) tw.on_timer do reactor.stop if sw.accessed write_file(path) end tw.attach(reactor) reactor.run tw.detach sw.detach sw end def write_file(path) File.open(path, "w+") { |f| f.write(rand.to_s) } end def delete_file(path) File.delete(TEMP_FILE_PATH) end describe Cool.io::StatWatcher do let :watcher do run_with_file_change(TEMP_FILE_PATH) end before :each do write_file(TEMP_FILE_PATH) end after :each do delete_file(TEMP_FILE_PATH) end it "fire on_change when the file it is watching is modified" do expect(watcher.accessed).to eq(true) end it "should pass previous and current file stat info given a stat watcher" do expect(watcher.previous.ino).to eq(watcher.current.ino) end it "should raise when the handler does not take 2 parameters" do class MyStatWatcher < Cool.io::StatWatcher remove_method :on_change def on_change end end expect { watcher.accessed }.to raise_error(ArgumentError) end end cool.io-1.8.1/spec/unix_server_spec.rb0000644000004100000410000000146414632135713017767 0ustar www-datawww-datarequire File.expand_path('../spec_helper', __FILE__) require 'tempfile' describe Cool.io::UNIXServer, :env => :exclude_win do before :each do @tmp = Tempfile.new('coolio_unix_server_spec') expect(File.unlink(@tmp.path)).to eq(1) expect(File.exist?(@tmp.path)).to eq(false) end it "creates a new Cool.io::UNIXServer" do listener = Cool.io::UNIXListener.new(@tmp.path) listener.listen(24) expect(File.socket?(@tmp.path)).to eq(true) end it "builds off an existing ::UNIXServer" do unix_server = ::UNIXServer.new(@tmp.path) expect(File.socket?(@tmp.path)).to eq(true) listener = Cool.io::UNIXServer.new(unix_server, Coolio::UNIXSocket) listener.listen(24) expect(File.socket?(@tmp.path)).to eq(true) expect(listener.fileno).to eq(unix_server.fileno) end end cool.io-1.8.1/spec/tcp_socket_spec.rb0000644000004100000410000000751014632135713017552 0ustar www-datawww-datarequire File.expand_path('../spec_helper', __FILE__) describe Coolio::TCPSocket do let :loop do Coolio::Loop.new end before :each do @echo = TCPServer.new("127.0.0.1", 0) @host = @echo.addr[3] @port = @echo.addr[1] @running = true @echo_thread = Thread.new do socks = [@echo] begin serv socks ensure socks.each do |s| s.close end end Thread.pass end end def serv(socks) while @running selected = select(socks, [], [], 0.1) next if selected.nil? selected[0].each do |s| if s == @echo socks.push s.accept next end begin unless s.eof? s.write(s.read_nonblock 1) end rescue SystemCallError, EOFError, IOError, SocketError end end end end def shutdown if @running @running = false @echo_thread.join end end after :each do shutdown end context "#close" do it "detaches all watchers on #close before loop#run" do client = Coolio::TCPSocket.connect(@host, @port) loop.attach client client.close expect(loop.watchers.size).to eq 0 end end context "#on_connect" do class OnConnect < Cool.io::TCPSocket attr :connected def on_connect @connected = true end end it "connected client called on_connect" do begin c = OnConnect.connect(@host, @port) loop.attach c loop.run_once expect(c.connected).to eq true ensure c.close end end end context "#on_connect_failed" do class OnConnectFailed < Cool.io::TCPSocket attr :connect_failed def on_connect_failed @connect_failed = true end end it "try to connect dead host" do serv = TCPServer.new(0) dead_host = serv.addr[3] dead_port = serv.addr[1] serv.close c = OnConnectFailed.connect(dead_host, dead_port) loop.attach c loop.run_once # on_connect_failed expect(c.connect_failed).to eq true end end context "#on_close" do class Closed < StandardError; end class OnClose < Cool.io::TCPSocket def on_close raise Closed end end let :client do OnClose.connect(@host, @port) end before :each do loop.attach client loop.run_once # on_connect client.write "0" end it "disconnect from client" do expect { client.close }.to raise_error(Closed) end it "disconnect from server" do shutdown expect { loop.run }.to raise_error(Closed) end end context "#on_read" do class Finished < StandardError; end class OnRead < Cool.io::TCPSocket attr :read_data, :times def on_connect @read_data = "" @times = 0 end def on_read(data) @read_data += data @times += 1 if @times < 5 write "#{@times}" else close raise Finished end end end it "receive 5 times" do c = OnRead.connect(@host, @port) loop.attach c loop.run_once # on_connect c.write "0" expect { loop.run }.to raise_error(Finished) expect(c.times).to eq 5 expect(c.read_data).to eq "01234" end end context "#on_write_complete" do class WriteComplete < StandardError; end class OnWriteComplete < Cool.io::TCPSocket attr :called def on_write_complete @called = true close raise WriteComplete end end it "on_write_complete is called" do c = OnWriteComplete.connect(@host, @port) loop.attach c loop.run_once # on_connect c.write "aaa" expect { loop.run }.to raise_error(WriteComplete) end end end cool.io-1.8.1/spec/udp_socket_spec.rb0000644000004100000410000000217114632135713017552 0ustar www-datawww-datarequire File.expand_path('../spec_helper', __FILE__) describe "Coolio::UDPSocket" do let :loop do Coolio::Loop.new end before :each do @echo = UDPSocket.open @echo.bind nil, 0 @port = @echo.addr[1] @running = true @echo_thread = Thread.new do while @running begin msg, sender = @echo.recvfrom_nonblock(3) @echo.send(msg + "bbb", 0, sender[3], sender[1]) rescue IO::WaitReadable end Thread.pass end end end after :each do @running = false @echo_thread.join @echo.close end class Readable < Cool.io::IOWatcher attr :socket, :received def initialize @socket = UDPSocket.new super(@socket) end def on_readable @received = @socket.recvfrom_nonblock(6).first end end it "receive message #on_readable 5 times" do 5.times do begin r = Readable.new r.socket.send "aaa", 0, "localhost", @port loop.attach r loop.run_once expect(r.received).to eq "aaabbb" ensure r.detach end end end end cool.io-1.8.1/spec/timer_watcher_spec.rb0000644000004100000410000000271114632135713020247 0ustar www-datawww-datarequire File.expand_path('../spec_helper', __FILE__) describe Cool.io::TimerWatcher do interval = 0.010 let :loop do Cool.io::Loop.new end it "can have the on_timer callback defined after creation" do @watcher = Cool.io::TimerWatcher.new(interval, true) nr = '0' expect(@watcher.on_timer { nr.succ! }).to be_nil expect(@watcher.attach(loop)).to eq(@watcher) expect(nr).to eq('0') sleep interval loop.run_once expect(nr).to eq('1') end it "can be subclassed" do class MyTimerWatcher < Cool.io::TimerWatcher TMP = '0' def on_timer TMP.succ! end end @watcher = MyTimerWatcher.new(interval, true) expect(@watcher.attach(loop)).to eq(@watcher) expect(MyTimerWatcher::TMP).to eq('0') sleep interval loop.run_once expect(MyTimerWatcher::TMP).to eq('1') end it "can have the on_timer callback redefined between runs" do @watcher = Cool.io::TimerWatcher.new(interval, true) nr = '0' expect(@watcher.on_timer { nr.succ! }).to be_nil expect(@watcher.attach(loop)).to eq(@watcher) expect(nr).to eq('0') sleep interval loop.run_once expect(nr).to eq('1') @watcher.detach expect(@watcher.on_timer { nr = :foo }).to be_nil expect(@watcher.attach(loop)).to eq(@watcher) expect(nr).to eq('1') sleep interval loop.run_once expect(nr).to eq(:foo) end after :each do @watcher.detach if defined?(@watcher) end end cool.io-1.8.1/spec/tcp_server_spec.rb0000644000004100000410000001136514632135713017573 0ustar www-datawww-datarequire File.expand_path('../spec_helper', __FILE__) TIMEOUT = 0.010 HOST = '127.0.0.1' PORT = unused_port def send_data(data) io = TCPSocket.new('127.0.0.1', PORT) begin io.write data ensure io.close end end class MyConnection < Coolio::Socket attr_accessor :data, :connected, :closed def initialize(io, on_message) super(io) @on_message = on_message end def on_connect @connected = true end def on_close @closed = true end def on_read(data) @on_message.call(data) end end @data = "" def on_message(data) @data = data end def test_run(data = nil) reactor = Coolio::Loop.new server = Cool.io::TCPServer.new(HOST, PORT, MyConnection, method(:on_message)) reactor.attach(server) thread = Thread.new { reactor.run } send_data(data) if data sleep TIMEOUT reactor.stop server.detach send_data('') # to leave from blocking loop thread.join @data ensure server.close end def test_run_once(data = nil) reactor = Coolio::Loop.new server = Cool.io::TCPServer.new(HOST, PORT, MyConnection, method(:on_message)) reactor.attach(server) thread = Thread.new do reactor.run_once # on_connect reactor.run_once # on_read end send_data(data) if data thread.join server.detach @data ensure server.close end def test_run_once_timeout(timeout = TIMEOUT) @data = "" reactor = Coolio::Loop.new server = Cool.io::TCPServer.new(HOST, PORT, MyConnection, method(:on_message)) reactor.attach(server) thread = Thread.new { reactor.run_once(timeout) } sleep timeout server.detach thread.join @data ensure server.close end def test_run_timeout(data = nil, timeout = TIMEOUT) reactor = Coolio::Loop.new server = Cool.io::TCPServer.new(HOST, PORT, MyConnection, method(:on_message)) reactor.attach(server) running = true thread = Thread.new do while running and reactor.has_active_watchers? reactor.run_once(timeout) end end send_data(data) if data sleep timeout server.detach running = false # another send is not required thread.join @data ensure server.close end # This test should work on Windows describe Coolio::TCPServer do it '#run' do expect(test_run("hello")).to eq("hello") end it '#run_once' do expect(test_run_once("hello")).to eq("hello") end it '#run_once(timeout)' do test_run_once_timeout # should not block end it '#run_once(-timeout)' do expect { test_run_once_timeout(-0.1) }.to raise_error(ArgumentError) end it '#run(timeout)' do expect(test_run_timeout("hello")).to eq("hello") end describe "functionaltest" do let :loop do Coolio::Loop.new end let :port do unused_port end context "#on_connect" do class ServerOnConnect < Coolio::Socket def initialize(io, cb) super(io) @cb = cb end def on_connect @cb.call end end it "connected socket called on_connect" do begin connected = false server = Cool.io::TCPServer.new("localhost", port, ServerOnConnect, proc { connected = true }) loop.attach server s = TCPSocket.open("localhost", port) loop.run_once s.close expect(connected).to eq true ensure server.detach end end end context "#on_close" do class ServerOnClose < Coolio::Socket def initialize(io, cb) super(io) @cb = cb end def on_close @cb.call end end it "closed socket called on_close" do begin closed = false server = Cool.io::TCPServer.new("localhost", port, ServerOnConnect, proc { closed = true }) loop.attach server s = TCPSocket.open("localhost", port) loop.run_once s.close loop.run_once expect(closed).to eq true ensure server.detach end end end context "#on_read" do class Echo < Coolio::Socket def initialize(io, cb) super(io) @cb = cb end def on_read(data) @cb.call data _size = write(data + "fff") end end it "server socket received data" do begin data = "aaa" server = Cool.io::TCPServer.new("localhost", port, Echo, proc { |d| data = d }) loop.attach server thread = Thread.new { loop.run } s = TCPSocket.open("localhost", port) s.write "zzz" sleep 0.1 expect(data).to eq "zzz" expect(s.read 6).to eq "zzzfff" ensure s.close loop.stop server.detach thread.join end end end end end cool.io-1.8.1/spec/iobuffer_spec.rb0000644000004100000410000000766514632135713017230 0ustar www-datawww-datarequire File.expand_path('../spec_helper', __FILE__) describe IO::Buffer do let :buffer do IO::Buffer.new end it "provides a subset of the methods available in Strings" do expect(buffer << "foo").to eq "foo" expect(buffer << "bar").to eq "bar" expect(buffer.to_str).to eq "foobar" expect(buffer.to_str).to eq "foobar" expect(buffer.size).to eq 6 end it "provides append and prepend" do expect(buffer.append "bar").to eq "bar" expect(buffer.prepend "foo").to eq "foo" expect(buffer.append "baz").to eq "baz" expect(buffer.to_str).to eq "foobarbaz" end context "#read" do it "can be used to retrieve the contents of a buffer" do expect(buffer << "foo").to eq "foo" expect(buffer.read 2).to eq "fo" expect(buffer << "bar").to eq "bar" expect(buffer.read 2).to eq "ob" expect(buffer << "baz").to eq "baz" expect(buffer.read 3).to eq "arb" end end describe "provides methods for performing non-blocking I/O" do require "tempfile" context "#read_from" do context "using local file", :env => :exclude_win do let :tmp do t = Tempfile.open "read_from" t << "foobar" t.rewind t end it "will read as much data as possible" do expect(buffer.read_from tmp).to eq 6 expect(buffer.to_str).to eq "foobar" end end context "using udp socket" do before :each do @receiver = UDPSocket.open @receiver.bind nil, 0 @sender = UDPSocket.open @sender.connect "localhost", @receiver.addr[1] end after :each do @receiver.close @sender.close end it "will read as much data as possible" do select [], [@sender] @sender.send "foo", 0 select [@receiver] expect(buffer.read_from @receiver).to eq 3 expect(buffer.to_str).to eq "foo" select [], [@sender] @sender.send "barbaz", 0 select [@receiver] expect(buffer.read_from @receiver).to eq 6 expect(buffer.to_str).to eq "foobarbaz" end end end context "#write_to" do context "using local file", :env => :exclude_win do let :tmp do Tempfile.open "write_to" end it "writes the contents of the buffer" do buffer << "foo" expect(buffer.write_to tmp).to eq 3 tmp.rewind expect(tmp.read 3).to eq "foo" end end context "using udp socket" do before :each do @receiver = UDPSocket.open @receiver.bind nil, 0 @sender = UDPSocket.open @sender.connect "localhost", @receiver.addr[1] end after :each do @receiver.close @sender.close end it "will read as much data as possible" do buffer << "foo" select [], [@sender] expect(buffer.write_to @sender).to eq 3 select [@receiver] expect(@receiver.recvfrom_nonblock(3)[0]).to eq "foo" end end end end context "#clear" do it "clear all data" do buffer << "foo" expect(buffer.size).to eq 3 expect(buffer.empty?).to eq false buffer.clear expect(buffer.size).to eq 0 expect(buffer.empty?).to eq true end end context "#read_frame" do it "Read up to and including the given frame marker" do buffer << "foo\nbarbaz" data = "" expect(buffer.read_frame data, "\n".ord).to eq true expect(buffer.empty?).to eq false expect(data).to eq "foo\n" expect(buffer.to_str).to eq "barbaz" expect(buffer.read_frame data, "\n".ord).to eq false expect(buffer.empty?).to eq true expect(data).to eq "foo\nbarbaz" expect(buffer.to_str).to eq "" end end end cool.io-1.8.1/.rspec0000644000004100000410000000005514632135713014235 0ustar www-datawww-data--color --format documentation --backtrace cool.io-1.8.1/Rakefile0000644000004100000410000000421414632135713014566 0ustar www-datawww-datarequire 'bundler/gem_tasks' require 'rake/clean' require 'rspec/core/rake_task' RSpec::Core::RakeTask.new RSpec::Core::RakeTask.new(:rcov) do |task| task.rcov = true end task :default => %w(compile spec) require 'rdoc/task' Rake::RDocTask.new do |rdoc| version = File.exist?('VERSION') ? File.read('VERSION') : "" rdoc.rdoc_dir = 'rdoc' rdoc.title = "cool.io #{version}" rdoc.rdoc_files.include('README*') rdoc.rdoc_files.include('lib/**/*.rb') end require 'rake/extensiontask' spec = eval(File.read("cool.io.gemspec")) def configure_cross_compilation(ext) unless RUBY_PLATFORM =~ /mswin|mingw/ ext.cross_compile = true ext.cross_platform = ['x86-mingw32', 'x64-mingw32'] end end Rake::ExtensionTask.new('iobuffer_ext', spec) do |ext| ext.ext_dir = 'ext/iobuffer' configure_cross_compilation(ext) end Rake::ExtensionTask.new('cool.io_ext', spec) do |ext| ext.ext_dir = 'ext/cool.io' configure_cross_compilation(ext) end # Note that this rake-compiler-dock rake task dose not support bundle install(1) --path option. # Please use bundle install instead when you execute this rake task. namespace :build do desc 'Build gems for Windows per rake-compiler-dock' task :windows do require 'rake_compiler_dock' RakeCompilerDock.sh <<-CROSS bundle && bundle exec rake cross native gem RUBY_CC_VERSION='3.0.0:2.7.0:2.6.0:2.5.0:2.4.0' CROSS end end # adapted from http://flavoriffic.blogspot.com/2009/06/easily-valgrind-gdb-your-ruby-c.html def specs_command require "find" files = [] Find.find("spec") do |f| files << f if File.basename(f) =~ /.*spec.*\.rb$/ end cmdline = "#{RUBY} -I.:lib:ext:spec \ -e '%w[#{files.join(' ')}].each { |f| require f }'" end namespace :spec do desc "run specs with valgrind" task :valgrind => :compile do system "valgrind --num-callers=15 \ --partial-loads-ok=yes --undef-value-errors=no \ --tool=memcheck --leak-check=yes --track-fds=yes \ --show-reachable=yes #{specs_command}" end end CLEAN.include "**/*.rbc", "**/*.o", "**/*.so", "**/*.bundle" CLEAN.exclude "vendor/**/*.rbc", "vendor/**/*.o", "vendor/**/*.so", "vendor/**/*.bundle" cool.io-1.8.1/libev_ruby_gil.diff0000644000004100000410000001254414632135713016755 0ustar www-datawww-datadiff --git a/ext/libev/ev.c b/ext/libev/ev.c index 39b9faf..dae87f1 100644 --- a/ext/libev/ev.c +++ b/ext/libev/ev.c @@ -37,6 +37,10 @@ * either the BSD or the GPL. */ +/* ########## COOLIO PATCHERY HO! ########## */ +#include "ruby.h" +/* ######################################## */ + /* this big block deduces configuration from config.h */ #ifndef EV_STANDALONE # ifdef EV_CONFIG_H @@ -107,7 +111,7 @@ # undef EV_USE_POLL # define EV_USE_POLL 0 # endif - + # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H # ifndef EV_USE_EPOLL # define EV_USE_EPOLL EV_FEATURE_BACKENDS @@ -116,7 +120,7 @@ # undef EV_USE_EPOLL # define EV_USE_EPOLL 0 # endif - + # if HAVE_KQUEUE && HAVE_SYS_EVENT_H # ifndef EV_USE_KQUEUE # define EV_USE_KQUEUE EV_FEATURE_BACKENDS @@ -125,7 +129,7 @@ # undef EV_USE_KQUEUE # define EV_USE_KQUEUE 0 # endif - + # if HAVE_PORT_H && HAVE_PORT_CREATE # ifndef EV_USE_PORT # define EV_USE_PORT EV_FEATURE_BACKENDS @@ -161,7 +165,7 @@ # undef EV_USE_EVENTFD # define EV_USE_EVENTFD 0 # endif - + #endif #include @@ -2174,7 +2178,7 @@ downheap (ANHE *heap, int N, int k) heap [k] = heap [c]; ev_active (ANHE_w (heap [k])) = k; - + k = c; } @@ -2594,7 +2598,7 @@ ev_supported_backends (void) EV_THROW if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL; if (EV_USE_POLL ) flags |= EVBACKEND_POLL; if (EV_USE_SELECT) flags |= EVBACKEND_SELECT; - + return flags; } @@ -3398,9 +3402,33 @@ time_update (EV_P_ ev_tstamp max_block) } } +/* ########## COOLIO PATCHERY HO! ########## */ +#if defined(HAVE_RB_THREAD_BLOCKING_REGION) || defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL) +struct ev_poll_args { + struct ev_loop *loop; + ev_tstamp waittime; +}; + +static +VALUE ev_backend_poll(void *ptr) +{ + struct ev_poll_args *args = (struct ev_poll_args *)ptr; + struct ev_loop *loop = args->loop; + backend_poll (EV_A_ args->waittime); + return Qnil; +} +#endif +/* ######################################## */ + int ev_run (EV_P_ int flags) { +/* ########## COOLIO PATCHERY HO! ########## */ +#if defined(HAVE_RB_THREAD_BLOCKING_REGION) || defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL) + struct ev_poll_args poll_args; +#endif +/* ######################################## */ + #if EV_FEATURE_API ++loop_depth; #endif @@ -3518,7 +3546,70 @@ ev_run (EV_P_ int flags) ++loop_count; #endif assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */ - backend_poll (EV_A_ waittime); + +/* +########################## COOLIO PATCHERY HO! ########################## + +According to the grandwizards of Ruby, locking and unlocking of the global +interpreter lock are apparently too powerful a concept for a mere mortal to +wield (although redefining what + and - do to numbers is totally cool). +And so it came to pass that the only acceptable way to release the global +interpreter lock is through a convoluted callback system that thakes a +function pointer. While the grandwizard of libev foresaw this sort of scenario, +he too attempted to place an API with callbacks on it, one that runs before +the system call, and one that runs immediately after. + +And so it came to pass that trying to wrap everything up in callbacks created +two incompatible APIs, Ruby's which releases the global interpreter lock and +reacquires it when the callback returns, and libev's, which wants two +callbacks, one which runs before the polling operation starts, and one which +runs after it finishes. + +These two systems are incompatible as they both want to use callbacks to +solve the same problem, however libev wants to use before/after callbacks, +and Ruby wants to use an "around" callback. This presents a significant +problem as these two patterns of callbacks are diametrical opposites of each +other and thus cannot be composed. + +And thus we are left with no choice but to patch the internals of libev in +order to release a mutex at just the precise moment. + +This is a great example of a situation where granular locking and unlocking +of the GVL is practically required. The goal is to get as close to the +system call as possible, and to keep the GVL unlocked for the shortest +amount of time possible. + +Perhaps Ruby could benefit from such an API, e.g: + +rb_thread_unsafe_dangerous_crazy_blocking_region_begin(...); +rb_thread_unsafe_dangerous_crazy_blocking_region_end(...); + +####################################################################### +*/ + +/* + simulate to rb_thread_call_without_gvl using rb_theread_blocking_region. + https://github.com/brianmario/mysql2/blob/master/ext/mysql2/client.h#L8 +*/ + +#ifndef HAVE_RB_THREAD_CALL_WITHOUT_GVL +#ifdef HAVE_RB_THREAD_BLOCKING_REGION +#define rb_thread_call_without_gvl(func, data1, ubf, data2) \ + rb_thread_blocking_region((rb_blocking_function_t *)func, data1, ubf, data2) +#endif +#endif + +#if defined(HAVE_RB_THREAD_BLOCKING_REGION) || defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL) + poll_args.loop = loop; + poll_args.waittime = waittime; + rb_thread_call_without_gvl(ev_backend_poll, (void *)&poll_args, RUBY_UBF_IO, 0); +#else + backend_poll (EV_A_ waittime); +#endif +/* +############################# END PATCHERY ############################ +*/ + assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */ pipe_write_wanted = 0; /* just an optimisation, no fence needed */ cool.io-1.8.1/Gemfile0000644000004100000410000000013414632135713014411 0ustar www-datawww-datasource "https://rubygems.org" # Specify your gem's dependencies in cool.io.gemspec gemspec cool.io-1.8.1/LICENSE0000644000004100000410000000204314632135713014124 0ustar www-datawww-dataCopyright (c) 2007-10 Tony Arcieri Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. cool.io-1.8.1/ext/0000755000004100000410000000000014632135713013720 5ustar www-datawww-datacool.io-1.8.1/ext/iobuffer/0000755000004100000410000000000014632135713015521 5ustar www-datawww-datacool.io-1.8.1/ext/iobuffer/iobuffer.c0000644000004100000410000004676514632135713017510 0ustar www-datawww-data/* * Copyright (C) 2007-12 Tony Arcieri * You may redistribute this under the terms of the MIT license. * See LICENSE for details */ #include "ruby.h" #include "ruby/io.h" #include #include #include #ifndef _MSC_VER #include #endif #include /* 1 GiB maximum buffer size */ #define MAX_BUFFER_SIZE 0x40000000 /* Macro for retrieving the file descriptor from an FPTR */ #if !HAVE_RB_IO_T_FD #define FPTR_TO_FD(fptr) fileno(fptr->f) #else #define FPTR_TO_FD(fptr) fptr->fd #endif /* Default number of bytes in each node's buffer. Should be >= MTU */ #define DEFAULT_NODE_SIZE 16384 static unsigned default_node_size = DEFAULT_NODE_SIZE; struct buffer { unsigned size, node_size; struct buffer_node *head, *tail; struct buffer_node *pool_head, *pool_tail; }; struct buffer_node { unsigned start, end; struct buffer_node *next; unsigned char data[0]; }; static VALUE cIO_Buffer = Qnil; static VALUE IO_Buffer_allocate(VALUE klass); static void IO_Buffer_mark(struct buffer *); static void IO_Buffer_free(struct buffer *); static VALUE IO_Buffer_default_node_size(VALUE klass); static VALUE IO_Buffer_set_default_node_size(VALUE klass, VALUE size); static VALUE IO_Buffer_initialize(int argc, VALUE * argv, VALUE self); static VALUE IO_Buffer_clear(VALUE self); static VALUE IO_Buffer_size(VALUE self); static VALUE IO_Buffer_empty(VALUE self); static VALUE IO_Buffer_append(VALUE self, VALUE data); static VALUE IO_Buffer_prepend(VALUE self, VALUE data); static VALUE IO_Buffer_read(int argc, VALUE * argv, VALUE self); static VALUE IO_Buffer_read_frame(VALUE self, VALUE data, VALUE mark); static VALUE IO_Buffer_to_str(VALUE self); static VALUE IO_Buffer_read_from(VALUE self, VALUE io); static VALUE IO_Buffer_write_to(VALUE self, VALUE io); static struct buffer *buffer_new(void); static void buffer_clear(struct buffer * buf); static void buffer_free(struct buffer * buf); static void buffer_free_pool(struct buffer * buf); static void buffer_prepend(struct buffer * buf, char *str, unsigned len); static void buffer_append(struct buffer * buf, char *str, unsigned len); static void buffer_read(struct buffer * buf, char *str, unsigned len); static int buffer_read_frame(struct buffer * buf, VALUE str, char frame_mark); static void buffer_copy(struct buffer * buf, char *str, unsigned len); static int buffer_read_from(struct buffer * buf, int fd); static int buffer_write_to(struct buffer * buf, int fd); /* * High-performance I/O buffer intended for use in non-blocking programs * * Data is stored in as a memory-pooled linked list of equally sized chunks. * Routines are provided for high speed non-blocking reads and writes from * Ruby IO objects. */ void Init_iobuffer_ext() { cIO_Buffer = rb_define_class_under(rb_cIO, "Buffer", rb_cObject); rb_define_alloc_func(cIO_Buffer, IO_Buffer_allocate); rb_define_singleton_method(cIO_Buffer, "default_node_size", IO_Buffer_default_node_size, 0); rb_define_singleton_method(cIO_Buffer, "default_node_size=", IO_Buffer_set_default_node_size, 1); rb_define_method(cIO_Buffer, "initialize", IO_Buffer_initialize, -1); rb_define_method(cIO_Buffer, "clear", IO_Buffer_clear, 0); rb_define_method(cIO_Buffer, "size", IO_Buffer_size, 0); rb_define_method(cIO_Buffer, "empty?", IO_Buffer_empty, 0); rb_define_method(cIO_Buffer, "<<", IO_Buffer_append, 1); rb_define_method(cIO_Buffer, "append", IO_Buffer_append, 1); rb_define_method(cIO_Buffer, "write", IO_Buffer_append, 1); rb_define_method(cIO_Buffer, "prepend", IO_Buffer_prepend, 1); rb_define_method(cIO_Buffer, "read", IO_Buffer_read, -1); rb_define_method(cIO_Buffer, "read_frame", IO_Buffer_read_frame, 2); rb_define_method(cIO_Buffer, "to_str", IO_Buffer_to_str, 0); rb_define_method(cIO_Buffer, "read_from", IO_Buffer_read_from, 1); rb_define_method(cIO_Buffer, "write_to", IO_Buffer_write_to, 1); rb_define_const(cIO_Buffer, "MAX_SIZE", INT2NUM(MAX_BUFFER_SIZE)); } static VALUE IO_Buffer_allocate(VALUE klass) { return Data_Wrap_Struct(klass, IO_Buffer_mark, IO_Buffer_free, buffer_new()); } static void IO_Buffer_mark(struct buffer * buf) { /* Naively discard the memory pool whenever Ruby garbage collects */ buffer_free_pool(buf); } static void IO_Buffer_free(struct buffer * buf) { buffer_free(buf); } /** * call-seq: * IO_Buffer.default_node_size -> 4096 * * Retrieves the current value of the default node size. */ static VALUE IO_Buffer_default_node_size(VALUE klass) { return UINT2NUM(default_node_size); } /* * safely converts node sizes from Ruby numerics to C and raising * ArgumentError or RangeError on invalid sizes */ static unsigned convert_node_size(VALUE size) { if ( rb_funcall(size, rb_intern("<"), 1, INT2NUM(1)) == Qtrue || rb_funcall(size, rb_intern(">"), 1, INT2NUM(MAX_BUFFER_SIZE)) == Qtrue ) rb_raise(rb_eArgError, "invalid buffer size"); return (unsigned) NUM2INT(size); } /** * call-seq: * IO_Buffer.default_node_size = 16384 * * Sets the default node size for calling IO::Buffer.new with no arguments. */ static VALUE IO_Buffer_set_default_node_size(VALUE klass, VALUE size) { default_node_size = convert_node_size(size); return size; } /** * call-seq: * IO_Buffer.new(size = IO::Buffer.default_node_size) -> IO_Buffer * * Create a new IO_Buffer with linked segments of the given size */ static VALUE IO_Buffer_initialize(int argc, VALUE * argv, VALUE self) { VALUE node_size_obj; struct buffer *buf; if (rb_scan_args(argc, argv, "01", &node_size_obj) == 1) { Data_Get_Struct(self, struct buffer, buf); /* * Make sure we're not changing the buffer size after data * has been allocated */ assert(!buf->head); assert(!buf->pool_head); buf->node_size = convert_node_size(node_size_obj); } return Qnil; } /** * call-seq: * IO_Buffer#clear -> nil * * Clear all data from the IO_Buffer */ static VALUE IO_Buffer_clear(VALUE self) { struct buffer *buf; Data_Get_Struct(self, struct buffer, buf); buffer_clear(buf); return Qnil; } /** * call-seq: * IO_Buffer#size -> Integer * * Return the size of the buffer in bytes */ static VALUE IO_Buffer_size(VALUE self) { struct buffer *buf; Data_Get_Struct(self, struct buffer, buf); return INT2NUM(buf->size); } /** * call-seq: * IO_Buffer#empty? -> Boolean * * Is the buffer empty? */ static VALUE IO_Buffer_empty(VALUE self) { struct buffer *buf; Data_Get_Struct(self, struct buffer, buf); return buf->size > 0 ? Qfalse : Qtrue; } /** * call-seq: * IO_Buffer#append(data) -> String * * Append the given data to the end of the buffer */ static VALUE IO_Buffer_append(VALUE self, VALUE data) { struct buffer *buf; Data_Get_Struct(self, struct buffer, buf); /* Is this needed? Never seen anyone else do it... */ data = rb_convert_type(data, T_STRING, "String", "to_str"); buffer_append(buf, RSTRING_PTR(data), RSTRING_LEN(data)); return data; } /** * call-seq: * IO_Buffer#prepend(data) -> String * * Prepend the given data to the beginning of the buffer */ static VALUE IO_Buffer_prepend(VALUE self, VALUE data) { struct buffer *buf; Data_Get_Struct(self, struct buffer, buf); data = rb_convert_type(data, T_STRING, "String", "to_str"); buffer_prepend(buf, RSTRING_PTR(data), RSTRING_LEN(data)); return data; } /** * call-seq: * IO_Buffer#read(length = nil) -> String * * Read the specified abount of data from the buffer. If no value * is given the entire contents of the buffer are returned. Any data * read from the buffer is cleared. * The given length must be greater than 0 or an exception would raise. * If the buffer size is zero then an empty string is returned (regardless * the given length). */ static VALUE IO_Buffer_read(int argc, VALUE * argv, VALUE self) { VALUE length_obj, str; int length; struct buffer *buf; Data_Get_Struct(self, struct buffer, buf); if (rb_scan_args(argc, argv, "01", &length_obj) == 1) { length = NUM2INT(length_obj); if(length < 1) rb_raise(rb_eArgError, "length must be greater than zero"); if(length > buf->size) length = buf->size; } else length = buf->size; if(buf->size == 0) return rb_str_new2(""); str = rb_str_new(0, length); buffer_read(buf, RSTRING_PTR(str), length); return str; } /** * call-seq: * IO_Buffer#read_frame(str, mark) -> boolean * * Read up to and including the given frame marker (expressed a a * Fixnum 0-255) byte, copying into the supplied string object. If the mark is * not encountered before the end of the buffer, false is returned but data * is still copied into str. True is returned if the end of a frame is reached. * */ static VALUE IO_Buffer_read_frame(VALUE self, VALUE data, VALUE mark) { char mark_c = (char) NUM2INT(mark); struct buffer *buf; Data_Get_Struct(self, struct buffer, buf); if (buffer_read_frame(buf, data, mark_c)) { return Qtrue; } else { return Qfalse; } } /** * call-seq: * IO_Buffer#to_str -> String * * Convert the Buffer to a String. The original buffer is unmodified. */ static VALUE IO_Buffer_to_str(VALUE self) { VALUE str; struct buffer *buf; Data_Get_Struct(self, struct buffer, buf); str = rb_str_new(0, buf->size); buffer_copy(buf, RSTRING_PTR(str), buf->size); return str; } /** * call-seq: * IO_Buffer#read_from(io) -> Integer * * Perform a nonblocking read of the the given IO object and fill * the buffer with any data received. The call will read as much * data as it can until the read would block. */ static VALUE IO_Buffer_read_from(VALUE self, VALUE io) { struct buffer *buf; int ret; #if defined(HAVE_RB_IO_T) || defined(HAVE_RB_IO_DESCRIPTOR) rb_io_t *fptr; #else OpenFile *fptr; #endif Data_Get_Struct(self, struct buffer, buf); io = rb_convert_type(io, T_FILE, "IO", "to_io"); GetOpenFile(io, fptr); rb_io_set_nonblock(fptr); #ifdef HAVE_RB_IO_DESCRIPTOR ret = buffer_read_from(buf, rb_io_descriptor(io)); #else ret = buffer_read_from(buf, FPTR_TO_FD(fptr)); #endif return ret == -1 ? Qnil : INT2NUM(ret); } /** * call-seq: * IO_Buffer#write_to(io) -> Integer * * Perform a nonblocking write of the buffer to the given IO object. * As much data as possible is written until the call would block. * Any data which is written is removed from the buffer. */ static VALUE IO_Buffer_write_to(VALUE self, VALUE io) { struct buffer *buf; #if defined(HAVE_RB_IO_T) || defined(HAVE_RB_IO_DESCRIPTOR) rb_io_t *fptr; #else OpenFile *fptr; #endif Data_Get_Struct(self, struct buffer, buf); io = rb_convert_type(io, T_FILE, "IO", "to_io"); GetOpenFile(io, fptr); rb_io_set_nonblock(fptr); #ifdef HAVE_RB_IO_DESCRIPTOR return INT2NUM(buffer_write_to(buf, rb_io_descriptor(io))); #else return INT2NUM(buffer_write_to(buf, FPTR_TO_FD(fptr))); #endif } /* * Ruby bindings end here. Below is the actual implementation of * the underlying byte queue ADT */ /* Create a new buffer */ static struct buffer * buffer_new(void) { struct buffer *buf; buf = (struct buffer *) xmalloc(sizeof(struct buffer)); buf->head = buf->tail = buf->pool_head = buf->pool_tail = 0; buf->size = 0; buf->node_size = default_node_size; return buf; } /* Clear all data from a buffer */ static void buffer_clear(struct buffer * buf) { /* Move everything into the buffer pool */ if (!buf->pool_tail) { buf->pool_head = buf->pool_tail = buf->head; } else { buf->pool_tail->next = buf->head; } buf->head = buf->tail = 0; buf->size = 0; } /* Free a buffer */ static void buffer_free(struct buffer * buf) { buffer_clear(buf); buffer_free_pool(buf); free(buf); } /* Free the memory pool */ static void buffer_free_pool(struct buffer * buf) { struct buffer_node *tmp; while (buf->pool_head) { tmp = buf->pool_head; buf->pool_head = tmp->next; free(tmp); } buf->pool_tail = 0; } /* Create a new buffer_node (or pull one from the memory pool) */ static struct buffer_node * buffer_node_new(struct buffer * buf) { struct buffer_node *node; /* Pull from the memory pool if available */ if (buf->pool_head) { node = buf->pool_head; buf->pool_head = node->next; if (node->next) node->next = 0; else buf->pool_tail = 0; } else { node = (struct buffer_node *) xmalloc(sizeof(struct buffer_node) + buf->node_size); node->next = 0; } node->start = node->end = 0; return node; } /* Free a buffer node (i.e. return it to the memory pool) */ static void buffer_node_free(struct buffer * buf, struct buffer_node * node) { node->next = buf->pool_head; buf->pool_head = node; if (!buf->pool_tail) { buf->pool_tail = node; } } /* Prepend data to the front of the buffer */ static void buffer_prepend(struct buffer * buf, char *str, unsigned len) { struct buffer_node *node, *tmp; buf->size += len; /* If it fits in the beginning of the head */ if (buf->head && buf->head->start >= len) { buf->head->start -= len; memcpy(buf->head->data + buf->head->start, str, len); } else { node = buffer_node_new(buf); node->next = buf->head; buf->head = node; if (!buf->tail) buf->tail = node; while (len > buf->node_size) { memcpy(node->data, str, buf->node_size); node->end = buf->node_size; tmp = buffer_node_new(buf); tmp->next = node->next; node->next = tmp; if (buf->tail == node) buf->tail = tmp; node = tmp; str += buf->node_size; len -= buf->node_size; } if (len > 0) { memcpy(node->data, str, len); node->end = len; } } } /* Append data to the front of the buffer */ static void buffer_append(struct buffer * buf, char *str, unsigned len) { unsigned nbytes; buf->size += len; /* If it fits in the remaining space in the tail */ if (buf->tail && len <= buf->node_size - buf->tail->end) { memcpy(buf->tail->data + buf->tail->end, str, len); buf->tail->end += len; return; } /* Empty list needs initialized */ if (!buf->head) { buf->head = buffer_node_new(buf); buf->tail = buf->head; } /* Build links out of the data */ while (len > 0) { nbytes = buf->node_size - buf->tail->end; if (len < nbytes) nbytes = len; memcpy(buf->tail->data + buf->tail->end, str, nbytes); str += nbytes; len -= nbytes; buf->tail->end += nbytes; if (len > 0) { buf->tail->next = buffer_node_new(buf); buf->tail = buf->tail->next; } } } /* Read data from the buffer (and clear what we've read) */ static void buffer_read(struct buffer * buf, char *str, unsigned len) { unsigned nbytes; struct buffer_node *tmp; while (buf->size > 0 && len > 0) { nbytes = buf->head->end - buf->head->start; if (len < nbytes) nbytes = len; memcpy(str, buf->head->data + buf->head->start, nbytes); str += nbytes; len -= nbytes; buf->head->start += nbytes; buf->size -= nbytes; if (buf->head->start == buf->head->end) { tmp = buf->head; buf->head = tmp->next; buffer_node_free(buf, tmp); if (!buf->head) buf->tail = 0; } } } /* * Read data from the buffer into str until byte frame_mark or empty. Bytes * are copied into str and removed if a complete frame is read, a true value * is returned */ static int buffer_read_frame(struct buffer * buf, VALUE str, char frame_mark) { unsigned nbytes = 0; struct buffer_node *tmp; while (buf->size > 0) { struct buffer_node *head = buf->head; char *loc, *s = head->data + head->start, *e = head->data + head->end; nbytes = e - s; loc = memchr(s, frame_mark, nbytes); if (loc) { nbytes = loc - s + 1; } /* Copy less than everything if we found a frame byte */ rb_str_cat(str, s, nbytes); /* Fixup the buffer pointers to indicate the bytes were consumed */ head->start += nbytes; buf->size -= nbytes; if (head->start == head->end) { buf->head = head->next; buffer_node_free(buf, head); if (!buf->head) buf->tail = 0; } if (loc) { return 1; } } return 0; } /* Copy data from the buffer without clearing it */ static void buffer_copy(struct buffer * buf, char *str, unsigned len) { unsigned nbytes; struct buffer_node *node; node = buf->head; while (node && len > 0) { nbytes = node->end - node->start; if (len < nbytes) nbytes = len; memcpy(str, node->data + node->start, nbytes); str += nbytes; len -= nbytes; if (node->start + nbytes == node->end) node = node->next; } } /* Write data from the buffer to a file descriptor */ static int buffer_write_to(struct buffer * buf, int fd) { int bytes_written, total_bytes_written = 0; struct buffer_node *tmp; while (buf->head) { bytes_written = write(fd, buf->head->data + buf->head->start, buf->head->end - buf->head->start); /* If the write failed... */ if (bytes_written < 0) { if (errno != EAGAIN) rb_sys_fail("write"); return total_bytes_written; } total_bytes_written += bytes_written; buf->size -= bytes_written; /* If the write blocked... */ if (bytes_written < buf->head->end - buf->head->start) { buf->head->start += bytes_written; return total_bytes_written; } /* Otherwise we wrote the whole buffer */ tmp = buf->head; buf->head = tmp->next; buffer_node_free(buf, tmp); if (!buf->head) buf->tail = 0; } return total_bytes_written; } /* Read data from a file descriptor to a buffer */ /* Append data to the front of the buffer */ static int buffer_read_from(struct buffer * buf, int fd) { int bytes_read, total_bytes_read = 0; unsigned nbytes; /* Empty list needs initialized */ if (!buf->head) { buf->head = buffer_node_new(buf); buf->tail = buf->head; } do { nbytes = buf->node_size - buf->tail->end; bytes_read = read(fd, buf->tail->data + buf->tail->end, nbytes); if (bytes_read == 0) { return -1; //When the file reaches EOF } else if (bytes_read < 0) { if (errno != EAGAIN) rb_sys_fail("read"); return total_bytes_read; } total_bytes_read += bytes_read; buf->tail->end += bytes_read; buf->size += bytes_read; if (buf->tail->end == buf->node_size) { buf->tail->next = buffer_node_new(buf); buf->tail = buf->tail->next; } } while (bytes_read == nbytes); return total_bytes_read; } cool.io-1.8.1/ext/iobuffer/extconf.rb0000644000004100000410000000034214632135713017513 0ustar www-datawww-datarequire 'mkmf' dir_config("iobuffer") have_func("rb_io_descriptor") have_library("c", "main") if have_macro("HAVE_RB_IO_T", "ruby/io.h") have_struct_member("rb_io_t", "fd", "ruby/io.h") end create_makefile("iobuffer_ext") cool.io-1.8.1/ext/libev/0000755000004100000410000000000014632135713015021 5ustar www-datawww-datacool.io-1.8.1/ext/libev/README.embed0000644000004100000410000000014714632135713016756 0ustar www-datawww-dataThis file is now included in the main libev documentation, see http://cvs.schmorp.de/libev/ev.html cool.io-1.8.1/ext/libev/ev_epoll.c0000644000004100000410000002321614632135713016776 0ustar www-datawww-data/* * libev epoll fd activity backend * * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ /* * general notes about epoll: * * a) epoll silently removes fds from the fd set. as nothing tells us * that an fd has been removed otherwise, we have to continually * "rearm" fds that we suspect *might* have changed (same * problem with kqueue, but much less costly there). * b) the fact that ADD != MOD creates a lot of extra syscalls due to a) * and seems not to have any advantage. * c) the inability to handle fork or file descriptors (think dup) * limits the applicability over poll, so this is not a generic * poll replacement. * d) epoll doesn't work the same as select with many file descriptors * (such as files). while not critical, no other advanced interface * seems to share this (rather non-unixy) limitation. * e) epoll claims to be embeddable, but in practise you never get * a ready event for the epoll fd (broken: <=2.6.26, working: >=2.6.32). * f) epoll_ctl returning EPERM means the fd is always ready. * * lots of "weird code" and complication handling in this file is due * to these design problems with epoll, as we try very hard to avoid * epoll_ctl syscalls for common usage patterns and handle the breakage * ensuing from receiving events for closed and otherwise long gone * file descriptors. */ #include #define EV_EMASK_EPERM 0x80 static void epoll_modify (EV_P_ int fd, int oev, int nev) { struct epoll_event ev; unsigned char oldmask; /* * we handle EPOLL_CTL_DEL by ignoring it here * on the assumption that the fd is gone anyways * if that is wrong, we have to handle the spurious * event in epoll_poll. * if the fd is added again, we try to ADD it, and, if that * fails, we assume it still has the same eventmask. */ if (!nev) return; oldmask = anfds [fd].emask; anfds [fd].emask = nev; /* store the generation counter in the upper 32 bits, the fd in the lower 32 bits */ ev.data.u64 = (uint64_t)(uint32_t)fd | ((uint64_t)(uint32_t)++anfds [fd].egen << 32); ev.events = (nev & EV_READ ? EPOLLIN : 0) | (nev & EV_WRITE ? EPOLLOUT : 0); if (expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) return; if (expect_true (errno == ENOENT)) { /* if ENOENT then the fd went away, so try to do the right thing */ if (!nev) goto dec_egen; if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) return; } else if (expect_true (errno == EEXIST)) { /* EEXIST means we ignored a previous DEL, but the fd is still active */ /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ if (oldmask == nev) goto dec_egen; if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) return; } else if (expect_true (errno == EPERM)) { /* EPERM means the fd is always ready, but epoll is too snobbish */ /* to handle it, unlike select or poll. */ anfds [fd].emask = EV_EMASK_EPERM; /* add fd to epoll_eperms, if not already inside */ if (!(oldmask & EV_EMASK_EPERM)) { array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, EMPTY2); epoll_eperms [epoll_epermcnt++] = fd; } return; } fd_kill (EV_A_ fd); dec_egen: /* we didn't successfully call epoll_ctl, so decrement the generation counter again */ --anfds [fd].egen; } static void epoll_poll (EV_P_ ev_tstamp timeout) { int i; int eventcnt; if (expect_false (epoll_epermcnt)) timeout = 0.; /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */ /* the default libev max wait time, however. */ EV_RELEASE_CB; eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, timeout * 1e3); EV_ACQUIRE_CB; if (expect_false (eventcnt < 0)) { if (errno != EINTR) ev_syserr ("(libev) epoll_wait"); return; } for (i = 0; i < eventcnt; ++i) { struct epoll_event *ev = epoll_events + i; int fd = (uint32_t)ev->data.u64; /* mask out the lower 32 bits */ int want = anfds [fd].events; int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); /* * check for spurious notification. * this only finds spurious notifications on egen updates * other spurious notifications will be found by epoll_ctl, below * we assume that fd is always in range, as we never shrink the anfds array */ if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32))) { /* recreate kernel state */ postfork |= 2; continue; } if (expect_false (got & ~want)) { anfds [fd].emask = want; /* * we received an event but are not interested in it, try mod or del * this often happens because we optimistically do not unregister fds * when we are no longer interested in them, but also when we get spurious * notifications for fds from another process. this is partially handled * above with the gencounter check (== our fd is not the event fd), and * partially here, when epoll_ctl returns an error (== a child has the fd * but we closed it). */ ev->events = (want & EV_READ ? EPOLLIN : 0) | (want & EV_WRITE ? EPOLLOUT : 0); /* pre-2.6.9 kernels require a non-null pointer with EPOLL_CTL_DEL, */ /* which is fortunately easy to do for us. */ if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) { postfork |= 2; /* an error occurred, recreate kernel state */ continue; } } fd_event (EV_A_ fd, got); } /* if the receive array was full, increase its size */ if (expect_false (eventcnt == epoll_eventmax)) { ev_free (epoll_events); epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); } /* now synthesize events for all fds where epoll fails, while select works... */ for (i = epoll_epermcnt; i--; ) { int fd = epoll_eperms [i]; unsigned char events = anfds [fd].events & (EV_READ | EV_WRITE); if (anfds [fd].emask & EV_EMASK_EPERM && events) fd_event (EV_A_ fd, events); else { epoll_eperms [i] = epoll_eperms [--epoll_epermcnt]; anfds [fd].emask = 0; } } } inline_size int epoll_init (EV_P_ int flags) { #ifdef EPOLL_CLOEXEC backend_fd = epoll_create1 (EPOLL_CLOEXEC); if (backend_fd < 0 && (errno == EINVAL || errno == ENOSYS)) #endif backend_fd = epoll_create (256); if (backend_fd < 0) return 0; fcntl (backend_fd, F_SETFD, FD_CLOEXEC); backend_mintime = 1e-3; /* epoll does sometimes return early, this is just to avoid the worst */ backend_modify = epoll_modify; backend_poll = epoll_poll; epoll_eventmax = 64; /* initial number of events receivable per poll */ epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); return EVBACKEND_EPOLL; } inline_size void epoll_destroy (EV_P) { ev_free (epoll_events); array_free (epoll_eperm, EMPTY); } inline_size void epoll_fork (EV_P) { close (backend_fd); while ((backend_fd = epoll_create (256)) < 0) ev_syserr ("(libev) epoll_create"); fcntl (backend_fd, F_SETFD, FD_CLOEXEC); fd_rearm_all (EV_A); } cool.io-1.8.1/ext/libev/Changes0000644000004100000410000006550314632135713016325 0ustar www-datawww-dataRevision history for libev, a high-performance and full-featured event loop. 4.24 Wed Dec 28 05:19:55 CET 2016 - bump version to 4.24, as the release tarball inexplicably didn't have the right version in ev.h, even though the cvs-tagged version did have the right one (reported by Ales Teska). 4.23 Wed Nov 16 18:23:41 CET 2016 - move some declarations at the beginning to help certain retarded microsoft compilers, even though their documentation claims otherwise (reported by Ruslan Osmanov). 4.22 Sun Dec 20 22:11:50 CET 2015 - when epoll detects unremovable fds in the fd set, rebuild only the epoll descriptor, not the signal pipe, to avoid SIGPIPE in ev_async_send. This doesn't solve it on fork, so document what needs to be done in ev_loop_fork (analyzed by Benjamin Mahler). - remove superfluous sys/timeb.h include on win32 (analyzed by Jason Madden). - updated libecb. 4.20 Sat Jun 20 13:01:43 CEST 2015 - prefer noexcept over throw () with C++ 11. - update ecb.h due to incompatibilities with c11. - fix a potential aliasing issue when reading and writing watcher callbacks. 4.19 Thu Sep 25 08:18:25 CEST 2014 - ev.h wasn't valid C++ anymore, which tripped compilers other than clang, msvc or gcc (analyzed by Raphael 'kena' Poss). Unfortunately, C++ doesn't support typedefs for function pointers fully, so the affected declarations have to spell out the types each time. - when not using autoconf, tighten the check for clock_gettime and related functionality. 4.18 Fri Sep 5 17:55:26 CEST 2014 - events on files were not always generated properly with the epoll backend (testcase by Assaf Inbal). - mark event pipe fd as cloexec after a fork (analyzed by Sami Farin). - (ecb) support m68k, m88k and sh (patch by Miod Vallat). - use a reasonable fallback for EV_NSIG instead of erroring out when we can't detect the signal set size. - in the absence of autoconf, do not use the clock syscall on glibc >= 2.17 (avoids the syscall AND -lrt on systems doing clock_gettime in userspace). - ensure extern "C" function pointers are used for externally-visible loop callbacks (not watcher callbacks yet). - (ecb) work around memory barriers and volatile apparently both being broken in visual studio 2008 and later (analysed and patch by Nicolas Noble). 4.15 Fri Mar 1 12:04:50 CET 2013 - destroying a non-default loop would stop the global waitpid watcher (Denis Bilenko). - queueing pending watchers of higher priority from a watcher now invokes them in a timely fashion (reported by Denis Bilenko). - add throw() to all libev functions that cannot throw exceptions, for further code size decrease when compiling for C++. - add throw () to callbacks that must not throw exceptions (allocator, syserr, loop acquire/release, periodic reschedule cbs). - fix event_base_loop return code, add event_get_callback, event_base_new, event_base_get_method calls to improve libevent 1.x emulation and add some libevent 2.x functionality (based on a patch by Jeff Davey). - add more memory fences to fix a bug reported by Jeff Davey. Better be overfenced than underprotected. - ev_run now returns a boolean status (true meaning watchers are still active). - ev_once: undef EV_ERROR in ev_kqueue.c, to avoid clashing with libev's EV_ERROR (reported by 191919). - (ecb) add memory fence support for xlC (Darin McBride). - (ecb) add memory fence support for gcc-mips (Anton Kirilov). - (ecb) add memory fence support for gcc-alpha (Christian Weisgerber). - work around some kernels losing file descriptors by leaking the kqueue descriptor in the child. - work around linux inotify not reporting IN_ATTRIB changes for directories in many cases. - include sys/syscall.h instead of plain syscall.h. - check for io watcher loops in ev_verify, check for the most common reported usage bug in ev_io_start. - choose socket vs. WSASocket at compiletime using EV_USE_WSASOCKET. - always use WSASend/WSARecv directly on windows, hoping that this works in all cases (unlike read/write/send/recv...). - try to detect signals around a fork faster (test program by Denis Bilenko). - work around recent glibc versions that leak memory in realloc. - rename ev::embed::set to ev::embed::set_embed to avoid clashing the watcher base set (loop) method. - rewrite the async/signal pipe logic to always keep a valid fd, which simplifies (and hopefully correctifies :) the race checking on fork, at the cost of one extra fd. - add fat, msdos, jffs2, ramfs, ntfs and btrfs to the list of inotify-supporting filesystems. - move orig_CFLAGS assignment to after AC_INIT, as newer autoconf versions ignore it before (https://bugzilla.redhat.com/show_bug.cgi?id=908096). - add some untested android support. - enum expressions must be of type int (reported by Juan Pablo L). 4.11 Sat Feb 4 19:52:39 CET 2012 - INCOMPATIBLE CHANGE: ev_timer_again now clears the pending status, as was documented already, but not implemented in the repeating case. - new compiletime symbols: EV_NO_SMP and EV_NO_THREADS. - fix a race where the workaround against the epoll fork bugs caused signals to not be handled anymore. - correct backend_fudge for most backends, and implement a windows specific workaround to avoid looping because we call both select and Sleep, both with different time resolutions. - document range and guarantees of ev_sleep. - document reasonable ranges for periodics interval and offset. - rename backend_fudge to backend_mintime to avoid future confusion :) - change the default periodic reschedule function to hopefully be more exact and correct even in corner cases or in the far future. - do not rely on -lm anymore: use it when available but use our own floor () if it is missing. This should make it easier to embed, as no external libraries are required. - strategically import macros from libecb and mark rarely-used functions as cache-cold (saving almost 2k code size on typical amd64 setups). - add Symbols.ev and Symbols.event files, that were missing. - fix backend_mintime value for epoll (was 1/1024, is 1/1000 now). - fix #3 "be smart about timeouts" to not "deadlock" when timeout == now, also improve the section overall. - avoid "AVOIDING FINISHING BEFORE RETURNING" idiom. - support new EV_API_STATIC mode to make all libev symbols static. - supply default CFLAGS of -g -O3 with gcc when original CFLAGS were empty. 4.04 Wed Feb 16 09:01:51 CET 2011 - fix two problems in the native win32 backend, where reuse of fd's with different underlying handles caused handles not to be removed or added to the select set (analyzed and tested by Bert Belder). - do no rely on ceil() in ev_e?poll.c. - backport libev to HP-UX versions before 11 v3. - configure did not detect nanosleep and clock_gettime properly when they are available in the libc (as opposed to -lrt). 4.03 Tue Jan 11 14:37:25 CET 2011 - officially support polling files with all backends. - support files, /dev/zero etc. the same way as select in the epoll backend, by generating events on our own. - ports backend: work around solaris bug 6874410 and many related ones (EINTR, maybe more), with no performance loss (note that the solaris bug report is actually wrong, reality is far more bizarre and broken than that). - define EV_READ/EV_WRITE as macros in event.h, as some programs use #ifdef to test for them. - new (experimental) function: ev_feed_signal. - new (to become default) EVFLAG_NOSIGMASK flag. - new EVBACKEND_MASK symbol. - updated COMMON IDIOMS SECTION. 4.01 Fri Nov 5 21:51:29 CET 2010 - automake fucked it up, apparently, --add-missing -f is not quite enough to make it update its files, so 4.00 didn't install ev++.h and event.h on make install. grrr. - ev_loop(count|depth) didn't return anything (Robin Haberkorn). - change EV_UNDEF to 0xffffffff to silence some overzealous compilers. - use "(libev) " prefix for all libev error messages now. 4.00 Mon Oct 25 12:32:12 CEST 2010 - "PORTING FROM LIBEV 3.X TO 4.X" (in ev.pod) is recommended reading. - ev_embed_stop did not correctly stop the watcher (very good testcase by Vladimir Timofeev). - ev_run will now always update the current loop time - it erroneously didn't when idle watchers were active, causing timers not to fire. - fix a bug where a timeout of zero caused the timer not to fire in the libevent emulation (testcase by Péter Szabó). - applied win32 fixes by Michael Lenaghan (also James Mansion). - replace EV_MINIMAL by EV_FEATURES. - prefer EPOLL_CTL_ADD over EPOLL_CTL_MOD in some more cases, as it seems the former is *much* faster than the latter. - linux kernel version detection (for inotify bug workarounds) did not work properly. - reduce the number of spurious wake-ups with the ports backend. - remove dependency on sys/queue.h on freebsd (patch by Vanilla Hsu). - do async init within ev_async_start, not ev_async_set, which avoids an API quirk where the set function must be called in the C++ API even when there is nothing to set. - add (undocumented) EV_ENABLE when adding events with kqueue, this might help with OS X, which seems to need it despite documenting not to need it (helpfully pointed out by Tilghman Lesher). - do not use poll by default on freebsd, it's broken (what isn't on freebsd...). - allow to embed epoll on kernels >= 2.6.32. - configure now prepends -O3, not appends it, so one can still override it. - ev.pod: greatly expanded the portability section, added a porting section, a description of watcher states and made lots of minor fixes. - disable poll backend on AIX, the poll header spams the namespace and it's not worth working around dead platforms (reported and analyzed by Aivars Kalvans). - improve header file compatibility of the standalone eventfd code in an obscure case. - implement EV_AVOID_STDIO option. - do not use sscanf to parse linux version number (smaller, faster, no sscanf dependency). - new EV_CHILD_ENABLE and EV_SIGNAL_ENABLE configurable settings. - update libev.m4 HAVE_CLOCK_SYSCALL test for newer glibcs. - add section on accept() problems to the manpage. - rename EV_TIMEOUT to EV_TIMER. - rename ev_loop_count/depth/verify/loop/unloop. - remove ev_default_destroy and ev_default_fork. - switch to two-digit minor version. - work around an apparent gentoo compiler bug. - define _DARWIN_UNLIMITED_SELECT. just so. - use enum instead of #define for most constants. - improve compatibility to older C++ compilers. - (experimental) ev_run/ev_default_loop/ev_break/ev_loop_new have now default arguments when compiled as C++. - enable automake dependency tracking. - ev_loop_new no longer leaks memory when loop creation failed. - new ev_cleanup watcher type. 3.9 Thu Dec 31 07:59:59 CET 2009 - signalfd is no longer used by default and has to be requested explicitly - this means that easy to catch bugs become hard to catch race conditions, but the users have spoken. - point out the unspecified signal mask in the documentation, and that this is a race condition regardless of EV_SIGNALFD. - backport inotify code to C89. - inotify file descriptors could leak into child processes. - ev_stat watchers could keep an erroneous extra ref on the loop, preventing exit when unregistering all watchers (testcases provided by ry@tinyclouds.org). - implement EV_WIN32_HANDLE_TO_FD and EV_WIN32_CLOSE_FD configuration symbols to make it easier for apps to do their own fd management. - support EV_IDLE_ENABLE being disabled in ev++.h (patch by Didier Spezia). - take advantage of inotify_init1, if available, to set cloexec/nonblock on fd creation, to avoid races. - the signal handling pipe wasn't always initialised under windows (analysed by lekma). - changed minimum glibc requirement from glibc 2.9 to 2.7, for signalfd. - add missing string.h include (Denis F. Latypoff). - only replace ev_stat.prev when we detect an actual difference, so prev is (almost) always different to attr. this might have caused the problems with 04_stat.t. - add ev::timer->remaining () method to C++ API. 3.8 Sun Aug 9 14:30:45 CEST 2009 - incompatible change: do not necessarily reset signal handler to SIG_DFL when a sighandler is stopped. - ev_default_destroy did not properly free or zero some members, potentially causing crashes and memory corruption on repeated ev_default_destroy/ev_default_loop calls. - take advantage of signalfd on GNU/Linux systems. - document that the signal mask might be in an unspecified state when using libev's signal handling. - take advantage of some GNU/Linux calls to set cloexec/nonblock on fd creation, to avoid race conditions. 3.7 Fri Jul 17 16:36:32 CEST 2009 - ev_unloop and ev_loop wrongly used a global variable to exit loops, instead of using a per-loop variable (bug caught by accident...). - the ev_set_io_collect_interval interpretation has changed. - add new functionality: ev_set_userdata, ev_userdata, ev_set_invoke_pending_cb, ev_set_loop_release_cb, ev_invoke_pending, ev_pending_count, together with a long example about thread locking. - add ev_timer_remaining (as requested by Denis F. Latypoff). - add ev_loop_depth. - calling ev_unloop in fork/prepare watchers will no longer poll for new events. - Denis F. Latypoff corrected many typos in example code snippets. - honor autoconf detection of EV_USE_CLOCK_SYSCALL, also double- check that the syscall number is available before trying to use it (reported by ry@tinyclouds). - use GetSystemTimeAsFileTime instead of _timeb on windows, for slightly higher accuracy. - properly declare ev_loop_verify and ev_now_update even when !EV_MULTIPLICITY. - do not compile in any priority code when EV_MAXPRI == EV_MINPRI. - support EV_MINIMAL==2 for a reduced API. - actually 0-initialise struct sigaction when installing signals. - add section on hibernate and stopped processes to ev_timer docs. 3.6 Tue Apr 28 02:49:30 CEST 2009 - multiple timers becoming ready within an event loop iteration will be invoked in the "correct" order now. - do not leave the event loop early just because we have no active watchers, fixing a problem when embedding a kqueue loop that has active kernel events but no registered watchers (reported by blacksand blacksand). - correctly zero the idx values for arrays, so destroying and reinitialising the default loop actually works (patch by Malek Hadj-Ali). - implement ev_suspend and ev_resume. - new EV_CUSTOM revents flag for use by applications. - add documentation section about priorities. - add a glossary to the documentation. - extend the ev_fork description slightly. - optimize a jump out of call_pending. 3.53 Sun Feb 15 02:38:20 CET 2009 - fix a bug in event pipe creation on win32 that would cause a failed assertion on event loop creation (patch by Malek Hadj-Ali). - probe for CLOCK_REALTIME support at runtime as well and fall back to gettimeofday if there is an error, to support older operating systems with newer header files/libraries. - prefer gettimeofday over clock_gettime with USE_CLOCK_SYSCALL (default most everywhere), otherwise not. 3.52 Wed Jan 7 21:43:02 CET 2009 - fix compilation of select backend in fd_set mode when NFDBITS is missing (to get it to compile on QNX, reported by Rodrigo Campos). - better select-nfds handling when select backend is in fd_set mode. - diagnose fd_set overruns when select backend is in fd_set mode. - due to a thinko, instead of disabling everything but select on the borked OS X platform, everything but select was allowed (reported by Emanuele Giaquinta). - actually verify that local and remote port are matching in libev's socketpair emulation, which makes denial-of-service attacks harder (but not impossible - it's windows). Make sure it even works under vista, which thinks that getpeer/sockname should return fantasy port numbers. - include "libev" in all assertion messages for potentially clearer diagnostics. - event_get_version (libevent compatibility) returned a useless string instead of the expected version string (patch by W.C.A. Wijngaards). 3.51 Wed Dec 24 23:00:11 CET 2008 - fix a bug where an inotify watcher was added twice, causing freezes on hash collisions (reported and analysed by Graham Leggett). - new config symbol, EV_USE_CLOCK_SYSCALL, to make libev use a direct syscall - slower, but no dependency on librt et al. - assume negative return values != -1 signals success of port_getn (http://cvs.epicsol.org/cgi/viewcvs.cgi/epic5/source/newio.c?rev=1.52) (no known failure reports, but it doesn't hurt). - fork detection in ev_embed now stops and restarts the watcher automatically. - EXPERIMENTAL: default the method to operator () in ev++.h, to make it nicer to use functors (requested by Benedek László). - fixed const object callbacks in ev++.h. - replaced loop_ref argument of watcher.set (loop) by a direct ev_loop * in ev++.h, to avoid clashes with functor patch. - do not try to watch the empty string via inotify. - inotify watchers could be leaked under certain circumstances. - OS X 10.5 is actually even more broken than earlier versions, so fall back to select on that piece of garbage. - fixed some weirdness in the ev_embed documentation. 3.49 Wed Nov 19 11:26:53 CET 2008 - ev_stat watchers will now use inotify as a mere hint on kernels <2.6.25, or if the filesystem is not in the "known to be good" list. - better mingw32 compatibility (it's not as borked as native win32) (analysed by Roger Pack). - include stdio.h in the example program, as too many people are confused by the weird C language otherwise. I guess the next thing I get told is that the "..." ellipses in the examples don't compile with their C compiler. 3.48 Thu Oct 30 09:02:37 CET 2008 - further optimise away the EPOLL_CTL_ADD/MOD combo in the epoll backend by assuming the kernel event mask hasn't changed if ADD fails with EEXIST. - work around spurious event notification bugs in epoll by using a 32-bit generation counter. recreate kernel state if we receive spurious notifications or unwanted events. this is very costly, but I didn't come up with this horrible design. - use memset to initialise most arrays now and do away with the init functions. - expand time-out strategies into a "Be smart about timeouts" section. - drop the "struct" from all ev_watcher declarations in the documentation and did other clarifications (yeah, it was a mistake to have a struct AND a function called ev_loop). - fix a bug where ev_default would not initialise the default loop again after it was destroyed with ev_default_destroy. - rename syserr to ev_syserr to avoid name clashes when embedding, do similar changes for event.c. 3.45 Tue Oct 21 21:59:26 CEST 2008 - disable inotify usage on linux <2.6.25, as it is broken (reported by Yoann Vandoorselaere). - ev_stat erroneously would try to add inotify watchers even when inotify wasn't available (this should only have a performance impact). - ev_once now passes both timeout and io to the callback if both occur concurrently, instead of giving timeouts precedence. - disable EV_USE_INOTIFY when sys/inotify.h is too old. 3.44 Mon Sep 29 05:18:39 CEST 2008 - embed watchers now automatically invoke ev_loop_fork on the embedded loop when the parent loop forks. - new function: ev_now_update (loop). - verify_watcher was not marked static. - improve the "associating..." manpage section. - documentation tweaks here and there. 3.43 Sun Jul 6 05:34:41 CEST 2008 - include more include files on windows to get struct _stati64 (reported by Chris Hulbert, but doesn't quite fix his issue). - add missing #include in ev.c on windows (reported by Matt Tolton). 3.42 Tue Jun 17 12:12:07 CEST 2008 - work around yet another windows bug: FD_SET actually adds fd's multiple times to the fd_*SET*, despite official MSN docs claiming otherwise. Reported and well-analysed by Matt Tolton. - define NFDBITS to 0 when EV_SELECT_IS_WINSOCKET to make it compile (reported any analysed by Chris Hulbert). - fix a bug in ev_ebadf (this function is only used to catch programming errors in the libev user). reported by Matt Tolton. - fix a bug in fd_intern on win32 (could lead to compile errors under some circumstances, but would work correctly if it compiles). reported by Matt Tolton. - (try to) work around missing lstat on windows. - pass in the write fd set as except fd set under windows. windows is so uncontrollably lame that it requires this. this means that switching off oobinline is not supported (but tcp/ip doesn't have oob, so that would be stupid anyways. - use posix module symbol to auto-detect monotonic clock presence and some other default values. 3.41 Fri May 23 18:42:54 CEST 2008 - work around an obscure bug in winsocket select: if you provide only empty fd sets then select returns WSAEINVAL. how sucky. - improve timer scheduling stability and reduce use of time_epsilon. - use 1-based 2-heap for EV_MINIMAL, simplifies code, reduces codesize and makes for better cache-efficiency. - use 3-based 4-heap for !EV_MINIMAL. this makes better use of cpu cache lines and gives better growth behaviour than 2-based heaps. - cache timestamp within heap for !EV_MINIMAL, to avoid random memory accesses. - document/add EV_USE_4HEAP and EV_HEAP_CACHE_AT. - fix a potential aliasing issue in ev_timer_again. - add/document ev_periodic_at, retract direct access to ->at. - improve ev_stat docs. - add portability requirements section. - fix manpage headers etc. - normalise WSA error codes to lower range on windows. - add consistency check code that can be called automatically or on demand to check for internal structures (ev_loop_verify). 3.31 Wed Apr 16 20:45:04 CEST 2008 - added last minute fix for ev_poll.c by Brandon Black. 3.3 Wed Apr 16 19:04:10 CEST 2008 - event_base_loopexit should return 0 on success (W.C.A. Wijngaards). - added linux eventfd support. - try to autodetect epoll and inotify support by libc header version if not using autoconf. - new symbols: EV_DEFAULT_UC and EV_DEFAULT_UC_. - declare functions defined in ev.h as inline if C99 or gcc are available. - enable inlining with gcc versions 2 and 3. - work around broken poll implementations potentially not clearing revents field in ev_poll (Brandon Black) (no such systems are known at this time). - work around a bug in realloc on openbsd and darwin, also makes the erroneous valgrind complaints go away (noted by various people). - fix ev_async_pending, add c++ wrapper for ev_async (based on patch sent by Johannes Deisenhofer). - add sensible set method to ev::embed. - made integer constants type int in ev.h. 3.2 Wed Apr 2 17:11:19 CEST 2008 - fix a 64 bit overflow issue in the select backend, by using fd_mask instead of int for the mask. - rename internal sighandler to avoid clash with very old perls. - entering ev_loop will not clear the ONESHOT or NONBLOCKING flags of any outer loops anymore. - add ev_async_pending. 3.1 Thu Mar 13 13:45:22 CET 2008 - implement ev_async watchers. - only initialise signal pipe on demand. - make use of sig_atomic_t configurable. - improved documentation. 3.0 Mon Jan 28 13:14:47 CET 2008 - API/ABI bump to version 3.0. - ev++.h includes "ev.h" by default now, not . - slightly improved documentation. - speed up signal detection after a fork. - only optionally return trace status changed in ev_child watchers. - experimental (and undocumented) loop wrappers for ev++.h. 2.01 Tue Dec 25 08:04:41 CET 2007 - separate Changes file. - fix ev_path_set => ev_stat_set typo. - remove event_compat.h from the libev tarball. - change how include files are found. - doc updates. - update licenses, explicitly allow for GPL relicensing. 2.0 Sat Dec 22 17:47:03 CET 2007 - new ev_sleep, ev_set_(io|timeout)_collect_interval. - removed epoll from embeddable fd set. - fix embed watchers. - renamed ev_embed.loop to other. - added exported Symbol tables. - undefine member wrapper macros at the end of ev.c. - respect EV_H in ev++.h. 1.86 Tue Dec 18 02:36:57 CET 2007 - fix memleak on loop destroy (not relevant for perl). 1.85 Fri Dec 14 20:32:40 CET 2007 - fix some aliasing issues w.r.t. timers and periodics (not relevant for perl). (for historic versions refer to EV/Changes, found in the Perl interface) 0.1 Wed Oct 31 21:31:48 CET 2007 - original version; hacked together in <24h. cool.io-1.8.1/ext/libev/ev_win32.c0000644000004100000410000001231014632135713016616 0ustar www-datawww-data/* * libev win32 compatibility cruft (_not_ a backend) * * Copyright (c) 2007,2008,2009 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #ifdef _WIN32 /* note: the comment below could not be substantiated, but what would I care */ /* MSDN says this is required to handle SIGFPE */ /* my wild guess would be that using something floating-pointy is required */ /* for the crt to do something about it */ volatile double SIGFPE_REQ = 0.0f; static SOCKET ev_tcp_socket (void) { #if EV_USE_WSASOCKET return WSASocket (AF_INET, SOCK_STREAM, 0, 0, 0, 0); #else return socket (AF_INET, SOCK_STREAM, 0); #endif } /* oh, the humanity! */ static int ev_pipe (int filedes [2]) { struct sockaddr_in addr = { 0 }; int addr_size = sizeof (addr); struct sockaddr_in adr2; int adr2_size = sizeof (adr2); SOCKET listener; SOCKET sock [2] = { -1, -1 }; if ((listener = ev_tcp_socket ()) == INVALID_SOCKET) return -1; addr.sin_family = AF_INET; addr.sin_addr.s_addr = htonl (INADDR_LOOPBACK); addr.sin_port = 0; if (bind (listener, (struct sockaddr *)&addr, addr_size)) goto fail; if (getsockname (listener, (struct sockaddr *)&addr, &addr_size)) goto fail; if (listen (listener, 1)) goto fail; if ((sock [0] = ev_tcp_socket ()) == INVALID_SOCKET) goto fail; if (connect (sock [0], (struct sockaddr *)&addr, addr_size)) goto fail; /* TODO: returns INVALID_SOCKET on winsock accept, not < 0. fix it */ /* when convenient, probably by just removing error checking altogether? */ if ((sock [1] = accept (listener, 0, 0)) < 0) goto fail; /* windows vista returns fantasy port numbers for sockets: * example for two interconnected tcp sockets: * * (Socket::unpack_sockaddr_in getsockname $sock0)[0] == 53364 * (Socket::unpack_sockaddr_in getpeername $sock0)[0] == 53363 * (Socket::unpack_sockaddr_in getsockname $sock1)[0] == 53363 * (Socket::unpack_sockaddr_in getpeername $sock1)[0] == 53365 * * wow! tridirectional sockets! * * this way of checking ports seems to work: */ if (getpeername (sock [0], (struct sockaddr *)&addr, &addr_size)) goto fail; if (getsockname (sock [1], (struct sockaddr *)&adr2, &adr2_size)) goto fail; errno = WSAEINVAL; if (addr_size != adr2_size || addr.sin_addr.s_addr != adr2.sin_addr.s_addr /* just to be sure, I mean, it's windows */ || addr.sin_port != adr2.sin_port) goto fail; closesocket (listener); #if EV_SELECT_IS_WINSOCKET filedes [0] = EV_WIN32_HANDLE_TO_FD (sock [0]); filedes [1] = EV_WIN32_HANDLE_TO_FD (sock [1]); #else /* when select isn't winsocket, we also expect socket, connect, accept etc. * to work on fds */ filedes [0] = sock [0]; filedes [1] = sock [1]; #endif return 0; fail: closesocket (listener); if (sock [0] != INVALID_SOCKET) closesocket (sock [0]); if (sock [1] != INVALID_SOCKET) closesocket (sock [1]); return -1; } #undef pipe #define pipe(filedes) ev_pipe (filedes) #define EV_HAVE_EV_TIME 1 ev_tstamp ev_time (void) { FILETIME ft; ULARGE_INTEGER ui; GetSystemTimeAsFileTime (&ft); ui.u.LowPart = ft.dwLowDateTime; ui.u.HighPart = ft.dwHighDateTime; /* msvc cannot convert ulonglong to double... yes, it is that sucky */ return (LONGLONG)(ui.QuadPart - 116444736000000000) * 1e-7; } #endif cool.io-1.8.1/ext/libev/ev_kqueue.c0000644000004100000410000001523214632135713017161 0ustar www-datawww-data/* * libev kqueue backend * * Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #include #include #include #include #include inline_speed void kqueue_change (EV_P_ int fd, int filter, int flags, int fflags) { ++kqueue_changecnt; array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2); EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); } /* OS X at least needs this */ #ifndef EV_ENABLE # define EV_ENABLE 0 #endif #ifndef NOTE_EOF # define NOTE_EOF 0 #endif static void kqueue_modify (EV_P_ int fd, int oev, int nev) { if (oev != nev) { if (oev & EV_READ) kqueue_change (EV_A_ fd, EVFILT_READ , EV_DELETE, 0); if (oev & EV_WRITE) kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_DELETE, 0); } /* to detect close/reopen reliably, we have to re-add */ /* event requests even when oev == nev */ if (nev & EV_READ) kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD | EV_ENABLE, NOTE_EOF); if (nev & EV_WRITE) kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD | EV_ENABLE, NOTE_EOF); } static void kqueue_poll (EV_P_ ev_tstamp timeout) { int res, i; struct timespec ts; /* need to resize so there is enough space for errors */ if (kqueue_changecnt > kqueue_eventmax) { ev_free (kqueue_events); kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt); kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); } EV_RELEASE_CB; EV_TS_SET (ts, timeout); res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); EV_ACQUIRE_CB; kqueue_changecnt = 0; if (expect_false (res < 0)) { if (errno != EINTR) ev_syserr ("(libev) kevent"); return; } for (i = 0; i < res; ++i) { int fd = kqueue_events [i].ident; if (expect_false (kqueue_events [i].flags & EV_ERROR)) { int err = kqueue_events [i].data; /* we are only interested in errors for fds that we are interested in :) */ if (anfds [fd].events) { if (err == ENOENT) /* resubmit changes on ENOENT */ kqueue_modify (EV_A_ fd, 0, anfds [fd].events); else if (err == EBADF) /* on EBADF, we re-check the fd */ { if (fd_valid (fd)) kqueue_modify (EV_A_ fd, 0, anfds [fd].events); else fd_kill (EV_A_ fd); } else /* on all other errors, we error out on the fd */ fd_kill (EV_A_ fd); } } else fd_event ( EV_A_ fd, kqueue_events [i].filter == EVFILT_READ ? EV_READ : kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE : 0 ); } if (expect_false (res == kqueue_eventmax)) { ev_free (kqueue_events); kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_eventmax + 1); kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); } } inline_size int kqueue_init (EV_P_ int flags) { /* initialize the kernel queue */ kqueue_fd_pid = getpid (); if ((backend_fd = kqueue ()) < 0) return 0; fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ backend_mintime = 1e-9; /* apparently, they did the right thing in freebsd */ backend_modify = kqueue_modify; backend_poll = kqueue_poll; kqueue_eventmax = 64; /* initial number of events receivable per poll */ kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); kqueue_changes = 0; kqueue_changemax = 0; kqueue_changecnt = 0; return EVBACKEND_KQUEUE; } inline_size void kqueue_destroy (EV_P) { ev_free (kqueue_events); ev_free (kqueue_changes); } inline_size void kqueue_fork (EV_P) { /* some BSD kernels don't just destroy the kqueue itself, * but also close the fd, which isn't documented, and * impossible to support properly. * we remember the pid of the kqueue call and only close * the fd if the pid is still the same. * this leaks fds on sane kernels, but BSD interfaces are * notoriously buggy and rarely get fixed. */ pid_t newpid = getpid (); if (newpid == kqueue_fd_pid) close (backend_fd); kqueue_fd_pid = newpid; while ((backend_fd = kqueue ()) < 0) ev_syserr ("(libev) kqueue"); fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* re-register interest in fds */ fd_rearm_all (EV_A); } /* sys/event.h defines EV_ERROR */ #undef EV_ERROR cool.io-1.8.1/ext/libev/ev_poll.c0000644000004100000410000001053314632135713016627 0ustar www-datawww-data/* * libev poll fd activity backend * * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #include inline_size void pollidx_init (int *base, int count) { /* consider using memset (.., -1, ...), which is practically guaranteed * to work on all systems implementing poll */ while (count--) *base++ = -1; } static void poll_modify (EV_P_ int fd, int oev, int nev) { int idx; if (oev == nev) return; array_needsize (int, pollidxs, pollidxmax, fd + 1, pollidx_init); idx = pollidxs [fd]; if (idx < 0) /* need to allocate a new pollfd */ { pollidxs [fd] = idx = pollcnt++; array_needsize (struct pollfd, polls, pollmax, pollcnt, EMPTY2); polls [idx].fd = fd; } assert (polls [idx].fd == fd); if (nev) polls [idx].events = (nev & EV_READ ? POLLIN : 0) | (nev & EV_WRITE ? POLLOUT : 0); else /* remove pollfd */ { pollidxs [fd] = -1; if (expect_true (idx < --pollcnt)) { polls [idx] = polls [pollcnt]; pollidxs [polls [idx].fd] = idx; } } } static void poll_poll (EV_P_ ev_tstamp timeout) { struct pollfd *p; int res; EV_RELEASE_CB; res = poll (polls, pollcnt, timeout * 1e3); EV_ACQUIRE_CB; if (expect_false (res < 0)) { if (errno == EBADF) fd_ebadf (EV_A); else if (errno == ENOMEM && !syserr_cb) fd_enomem (EV_A); else if (errno != EINTR) ev_syserr ("(libev) poll"); } else for (p = polls; res; ++p) { assert (("libev: poll() returned illegal result, broken BSD kernel?", p < polls + pollcnt)); if (expect_false (p->revents)) /* this expect is debatable */ { --res; if (expect_false (p->revents & POLLNVAL)) fd_kill (EV_A_ p->fd); else fd_event ( EV_A_ p->fd, (p->revents & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) | (p->revents & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) ); } } } inline_size int poll_init (EV_P_ int flags) { backend_mintime = 1e-3; backend_modify = poll_modify; backend_poll = poll_poll; pollidxs = 0; pollidxmax = 0; polls = 0; pollmax = 0; pollcnt = 0; return EVBACKEND_POLL; } inline_size void poll_destroy (EV_P) { ev_free (pollidxs); ev_free (polls); } cool.io-1.8.1/ext/libev/ev_vars.h0000644000004100000410000001412514632135713016642 0ustar www-datawww-data/* * loop member variable declarations * * Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #define VARx(type,name) VAR(name, type name) VARx(ev_tstamp, now_floor) /* last time we refreshed rt_time */ VARx(ev_tstamp, mn_now) /* monotonic clock "now" */ VARx(ev_tstamp, rtmn_diff) /* difference realtime - monotonic time */ /* for reverse feeding of events */ VARx(W *, rfeeds) VARx(int, rfeedmax) VARx(int, rfeedcnt) VAR (pendings, ANPENDING *pendings [NUMPRI]) VAR (pendingmax, int pendingmax [NUMPRI]) VAR (pendingcnt, int pendingcnt [NUMPRI]) VARx(int, pendingpri) /* highest priority currently pending */ VARx(ev_prepare, pending_w) /* dummy pending watcher */ VARx(ev_tstamp, io_blocktime) VARx(ev_tstamp, timeout_blocktime) VARx(int, backend) VARx(int, activecnt) /* total number of active events ("refcount") */ VARx(EV_ATOMIC_T, loop_done) /* signal by ev_break */ VARx(int, backend_fd) VARx(ev_tstamp, backend_mintime) /* assumed typical timer resolution */ VAR (backend_modify, void (*backend_modify)(EV_P_ int fd, int oev, int nev)) VAR (backend_poll , void (*backend_poll)(EV_P_ ev_tstamp timeout)) VARx(ANFD *, anfds) VARx(int, anfdmax) VAR (evpipe, int evpipe [2]) VARx(ev_io, pipe_w) VARx(EV_ATOMIC_T, pipe_write_wanted) VARx(EV_ATOMIC_T, pipe_write_skipped) #if !defined(_WIN32) || EV_GENWRAP VARx(pid_t, curpid) #endif VARx(char, postfork) /* true if we need to recreate kernel state after fork */ #if EV_USE_SELECT || EV_GENWRAP VARx(void *, vec_ri) VARx(void *, vec_ro) VARx(void *, vec_wi) VARx(void *, vec_wo) #if defined(_WIN32) || EV_GENWRAP VARx(void *, vec_eo) #endif VARx(int, vec_max) #endif #if EV_USE_POLL || EV_GENWRAP VARx(struct pollfd *, polls) VARx(int, pollmax) VARx(int, pollcnt) VARx(int *, pollidxs) /* maps fds into structure indices */ VARx(int, pollidxmax) #endif #if EV_USE_EPOLL || EV_GENWRAP VARx(struct epoll_event *, epoll_events) VARx(int, epoll_eventmax) VARx(int *, epoll_eperms) VARx(int, epoll_epermcnt) VARx(int, epoll_epermmax) #endif #if EV_USE_KQUEUE || EV_GENWRAP VARx(pid_t, kqueue_fd_pid) VARx(struct kevent *, kqueue_changes) VARx(int, kqueue_changemax) VARx(int, kqueue_changecnt) VARx(struct kevent *, kqueue_events) VARx(int, kqueue_eventmax) #endif #if EV_USE_PORT || EV_GENWRAP VARx(struct port_event *, port_events) VARx(int, port_eventmax) #endif #if EV_USE_IOCP || EV_GENWRAP VARx(HANDLE, iocp) #endif VARx(int *, fdchanges) VARx(int, fdchangemax) VARx(int, fdchangecnt) VARx(ANHE *, timers) VARx(int, timermax) VARx(int, timercnt) #if EV_PERIODIC_ENABLE || EV_GENWRAP VARx(ANHE *, periodics) VARx(int, periodicmax) VARx(int, periodiccnt) #endif #if EV_IDLE_ENABLE || EV_GENWRAP VAR (idles, ev_idle **idles [NUMPRI]) VAR (idlemax, int idlemax [NUMPRI]) VAR (idlecnt, int idlecnt [NUMPRI]) #endif VARx(int, idleall) /* total number */ VARx(struct ev_prepare **, prepares) VARx(int, preparemax) VARx(int, preparecnt) VARx(struct ev_check **, checks) VARx(int, checkmax) VARx(int, checkcnt) #if EV_FORK_ENABLE || EV_GENWRAP VARx(struct ev_fork **, forks) VARx(int, forkmax) VARx(int, forkcnt) #endif #if EV_CLEANUP_ENABLE || EV_GENWRAP VARx(struct ev_cleanup **, cleanups) VARx(int, cleanupmax) VARx(int, cleanupcnt) #endif #if EV_ASYNC_ENABLE || EV_GENWRAP VARx(EV_ATOMIC_T, async_pending) VARx(struct ev_async **, asyncs) VARx(int, asyncmax) VARx(int, asynccnt) #endif #if EV_USE_INOTIFY || EV_GENWRAP VARx(int, fs_fd) VARx(ev_io, fs_w) VARx(char, fs_2625) /* whether we are running in linux 2.6.25 or newer */ VAR (fs_hash, ANFS fs_hash [EV_INOTIFY_HASHSIZE]) #endif VARx(EV_ATOMIC_T, sig_pending) #if EV_USE_SIGNALFD || EV_GENWRAP VARx(int, sigfd) VARx(ev_io, sigfd_w) VARx(sigset_t, sigfd_set) #endif VARx(unsigned int, origflags) /* original loop flags */ #if EV_FEATURE_API || EV_GENWRAP VARx(unsigned int, loop_count) /* total number of loop iterations/blocks */ VARx(unsigned int, loop_depth) /* #ev_run enters - #ev_run leaves */ VARx(void *, userdata) /* C++ doesn't support the ev_loop_callback typedef here. stinks. */ VAR (release_cb, void (*release_cb)(EV_P) EV_THROW) VAR (acquire_cb, void (*acquire_cb)(EV_P) EV_THROW) VAR (invoke_cb , ev_loop_callback invoke_cb) #endif #undef VARx cool.io-1.8.1/ext/libev/ev.h0000644000004100000410000007146014632135713015614 0ustar www-datawww-data/* * libev native API header * * Copyright (c) 2007,2008,2009,2010,2011,2012,2015 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #ifndef EV_H_ #define EV_H_ #ifdef __cplusplus # define EV_CPP(x) x # if __cplusplus >= 201103L # define EV_THROW noexcept # else # define EV_THROW throw () # endif #else # define EV_CPP(x) # define EV_THROW #endif EV_CPP(extern "C" {) /*****************************************************************************/ /* pre-4.0 compatibility */ #ifndef EV_COMPAT3 # define EV_COMPAT3 1 #endif #ifndef EV_FEATURES # if defined __OPTIMIZE_SIZE__ # define EV_FEATURES 0x7c # else # define EV_FEATURES 0x7f # endif #endif #define EV_FEATURE_CODE ((EV_FEATURES) & 1) #define EV_FEATURE_DATA ((EV_FEATURES) & 2) #define EV_FEATURE_CONFIG ((EV_FEATURES) & 4) #define EV_FEATURE_API ((EV_FEATURES) & 8) #define EV_FEATURE_WATCHERS ((EV_FEATURES) & 16) #define EV_FEATURE_BACKENDS ((EV_FEATURES) & 32) #define EV_FEATURE_OS ((EV_FEATURES) & 64) /* these priorities are inclusive, higher priorities will be invoked earlier */ #ifndef EV_MINPRI # define EV_MINPRI (EV_FEATURE_CONFIG ? -2 : 0) #endif #ifndef EV_MAXPRI # define EV_MAXPRI (EV_FEATURE_CONFIG ? +2 : 0) #endif #ifndef EV_MULTIPLICITY # define EV_MULTIPLICITY EV_FEATURE_CONFIG #endif #ifndef EV_PERIODIC_ENABLE # define EV_PERIODIC_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_STAT_ENABLE # define EV_STAT_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_PREPARE_ENABLE # define EV_PREPARE_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_CHECK_ENABLE # define EV_CHECK_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_IDLE_ENABLE # define EV_IDLE_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_FORK_ENABLE # define EV_FORK_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_CLEANUP_ENABLE # define EV_CLEANUP_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_SIGNAL_ENABLE # define EV_SIGNAL_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_CHILD_ENABLE # ifdef _WIN32 # define EV_CHILD_ENABLE 0 # else # define EV_CHILD_ENABLE EV_FEATURE_WATCHERS #endif #endif #ifndef EV_ASYNC_ENABLE # define EV_ASYNC_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_EMBED_ENABLE # define EV_EMBED_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_WALK_ENABLE # define EV_WALK_ENABLE 0 /* not yet */ #endif /*****************************************************************************/ #if EV_CHILD_ENABLE && !EV_SIGNAL_ENABLE # undef EV_SIGNAL_ENABLE # define EV_SIGNAL_ENABLE 1 #endif /*****************************************************************************/ typedef double ev_tstamp; #include /* for memmove */ #ifndef EV_ATOMIC_T # include # define EV_ATOMIC_T sig_atomic_t volatile #endif #if EV_STAT_ENABLE # ifdef _WIN32 # include # include # endif # include #endif /* support multiple event loops? */ #if EV_MULTIPLICITY struct ev_loop; # define EV_P struct ev_loop *loop /* a loop as sole parameter in a declaration */ # define EV_P_ EV_P, /* a loop as first of multiple parameters */ # define EV_A loop /* a loop as sole argument to a function call */ # define EV_A_ EV_A, /* a loop as first of multiple arguments */ # define EV_DEFAULT_UC ev_default_loop_uc_ () /* the default loop, if initialised, as sole arg */ # define EV_DEFAULT_UC_ EV_DEFAULT_UC, /* the default loop as first of multiple arguments */ # define EV_DEFAULT ev_default_loop (0) /* the default loop as sole arg */ # define EV_DEFAULT_ EV_DEFAULT, /* the default loop as first of multiple arguments */ #else # define EV_P void # define EV_P_ # define EV_A # define EV_A_ # define EV_DEFAULT # define EV_DEFAULT_ # define EV_DEFAULT_UC # define EV_DEFAULT_UC_ # undef EV_EMBED_ENABLE #endif /* EV_INLINE is used for functions in header files */ #if __STDC_VERSION__ >= 199901L || __GNUC__ >= 3 # define EV_INLINE static inline #else # define EV_INLINE static #endif #ifdef EV_API_STATIC # define EV_API_DECL static #else # define EV_API_DECL extern #endif /* EV_PROTOTYPES can be used to switch of prototype declarations */ #ifndef EV_PROTOTYPES # define EV_PROTOTYPES 1 #endif /*****************************************************************************/ #define EV_VERSION_MAJOR 4 #define EV_VERSION_MINOR 24 /* eventmask, revents, events... */ enum { EV_UNDEF = (int)0xFFFFFFFF, /* guaranteed to be invalid */ EV_NONE = 0x00, /* no events */ EV_READ = 0x01, /* ev_io detected read will not block */ EV_WRITE = 0x02, /* ev_io detected write will not block */ EV__IOFDSET = 0x80, /* internal use only */ EV_IO = EV_READ, /* alias for type-detection */ EV_TIMER = 0x00000100, /* timer timed out */ #if EV_COMPAT3 EV_TIMEOUT = EV_TIMER, /* pre 4.0 API compatibility */ #endif EV_PERIODIC = 0x00000200, /* periodic timer timed out */ EV_SIGNAL = 0x00000400, /* signal was received */ EV_CHILD = 0x00000800, /* child/pid had status change */ EV_STAT = 0x00001000, /* stat data changed */ EV_IDLE = 0x00002000, /* event loop is idling */ EV_PREPARE = 0x00004000, /* event loop about to poll */ EV_CHECK = 0x00008000, /* event loop finished poll */ EV_EMBED = 0x00010000, /* embedded event loop needs sweep */ EV_FORK = 0x00020000, /* event loop resumed in child */ EV_CLEANUP = 0x00040000, /* event loop resumed in child */ EV_ASYNC = 0x00080000, /* async intra-loop signal */ EV_CUSTOM = 0x01000000, /* for use by user code */ EV_ERROR = (int)0x80000000 /* sent when an error occurs */ }; /* can be used to add custom fields to all watchers, while losing binary compatibility */ #ifndef EV_COMMON # define EV_COMMON void *data; #endif #ifndef EV_CB_DECLARE # define EV_CB_DECLARE(type) void (*cb)(EV_P_ struct type *w, int revents); #endif #ifndef EV_CB_INVOKE # define EV_CB_INVOKE(watcher,revents) (watcher)->cb (EV_A_ (watcher), (revents)) #endif /* not official, do not use */ #define EV_CB(type,name) void name (EV_P_ struct ev_ ## type *w, int revents) /* * struct member types: * private: you may look at them, but not change them, * and they might not mean anything to you. * ro: can be read anytime, but only changed when the watcher isn't active. * rw: can be read and modified anytime, even when the watcher is active. * * some internal details that might be helpful for debugging: * * active is either 0, which means the watcher is not active, * or the array index of the watcher (periodics, timers) * or the array index + 1 (most other watchers) * or simply 1 for watchers that aren't in some array. * pending is either 0, in which case the watcher isn't, * or the array index + 1 in the pendings array. */ #if EV_MINPRI == EV_MAXPRI # define EV_DECL_PRIORITY #elif !defined (EV_DECL_PRIORITY) # define EV_DECL_PRIORITY int priority; #endif /* shared by all watchers */ #define EV_WATCHER(type) \ int active; /* private */ \ int pending; /* private */ \ EV_DECL_PRIORITY /* private */ \ EV_COMMON /* rw */ \ EV_CB_DECLARE (type) /* private */ #define EV_WATCHER_LIST(type) \ EV_WATCHER (type) \ struct ev_watcher_list *next; /* private */ #define EV_WATCHER_TIME(type) \ EV_WATCHER (type) \ ev_tstamp at; /* private */ /* base class, nothing to see here unless you subclass */ typedef struct ev_watcher { EV_WATCHER (ev_watcher) } ev_watcher; /* base class, nothing to see here unless you subclass */ typedef struct ev_watcher_list { EV_WATCHER_LIST (ev_watcher_list) } ev_watcher_list; /* base class, nothing to see here unless you subclass */ typedef struct ev_watcher_time { EV_WATCHER_TIME (ev_watcher_time) } ev_watcher_time; /* invoked when fd is either EV_READable or EV_WRITEable */ /* revent EV_READ, EV_WRITE */ typedef struct ev_io { EV_WATCHER_LIST (ev_io) int fd; /* ro */ int events; /* ro */ } ev_io; /* invoked after a specific time, repeatable (based on monotonic clock) */ /* revent EV_TIMEOUT */ typedef struct ev_timer { EV_WATCHER_TIME (ev_timer) ev_tstamp repeat; /* rw */ } ev_timer; /* invoked at some specific time, possibly repeating at regular intervals (based on UTC) */ /* revent EV_PERIODIC */ typedef struct ev_periodic { EV_WATCHER_TIME (ev_periodic) ev_tstamp offset; /* rw */ ev_tstamp interval; /* rw */ ev_tstamp (*reschedule_cb)(struct ev_periodic *w, ev_tstamp now) EV_THROW; /* rw */ } ev_periodic; /* invoked when the given signal has been received */ /* revent EV_SIGNAL */ typedef struct ev_signal { EV_WATCHER_LIST (ev_signal) int signum; /* ro */ } ev_signal; /* invoked when sigchld is received and waitpid indicates the given pid */ /* revent EV_CHILD */ /* does not support priorities */ typedef struct ev_child { EV_WATCHER_LIST (ev_child) int flags; /* private */ int pid; /* ro */ int rpid; /* rw, holds the received pid */ int rstatus; /* rw, holds the exit status, use the macros from sys/wait.h */ } ev_child; #if EV_STAT_ENABLE /* st_nlink = 0 means missing file or other error */ # ifdef _WIN32 typedef struct _stati64 ev_statdata; # else typedef struct stat ev_statdata; # endif /* invoked each time the stat data changes for a given path */ /* revent EV_STAT */ typedef struct ev_stat { EV_WATCHER_LIST (ev_stat) ev_timer timer; /* private */ ev_tstamp interval; /* ro */ const char *path; /* ro */ ev_statdata prev; /* ro */ ev_statdata attr; /* ro */ int wd; /* wd for inotify, fd for kqueue */ } ev_stat; #endif #if EV_IDLE_ENABLE /* invoked when the nothing else needs to be done, keeps the process from blocking */ /* revent EV_IDLE */ typedef struct ev_idle { EV_WATCHER (ev_idle) } ev_idle; #endif /* invoked for each run of the mainloop, just before the blocking call */ /* you can still change events in any way you like */ /* revent EV_PREPARE */ typedef struct ev_prepare { EV_WATCHER (ev_prepare) } ev_prepare; /* invoked for each run of the mainloop, just after the blocking call */ /* revent EV_CHECK */ typedef struct ev_check { EV_WATCHER (ev_check) } ev_check; #if EV_FORK_ENABLE /* the callback gets invoked before check in the child process when a fork was detected */ /* revent EV_FORK */ typedef struct ev_fork { EV_WATCHER (ev_fork) } ev_fork; #endif #if EV_CLEANUP_ENABLE /* is invoked just before the loop gets destroyed */ /* revent EV_CLEANUP */ typedef struct ev_cleanup { EV_WATCHER (ev_cleanup) } ev_cleanup; #endif #if EV_EMBED_ENABLE /* used to embed an event loop inside another */ /* the callback gets invoked when the event loop has handled events, and can be 0 */ typedef struct ev_embed { EV_WATCHER (ev_embed) struct ev_loop *other; /* ro */ ev_io io; /* private */ ev_prepare prepare; /* private */ ev_check check; /* unused */ ev_timer timer; /* unused */ ev_periodic periodic; /* unused */ ev_idle idle; /* unused */ ev_fork fork; /* private */ #if EV_CLEANUP_ENABLE ev_cleanup cleanup; /* unused */ #endif } ev_embed; #endif #if EV_ASYNC_ENABLE /* invoked when somebody calls ev_async_send on the watcher */ /* revent EV_ASYNC */ typedef struct ev_async { EV_WATCHER (ev_async) EV_ATOMIC_T sent; /* private */ } ev_async; # define ev_async_pending(w) (+(w)->sent) #endif /* the presence of this union forces similar struct layout */ union ev_any_watcher { struct ev_watcher w; struct ev_watcher_list wl; struct ev_io io; struct ev_timer timer; struct ev_periodic periodic; struct ev_signal signal; struct ev_child child; #if EV_STAT_ENABLE struct ev_stat stat; #endif #if EV_IDLE_ENABLE struct ev_idle idle; #endif struct ev_prepare prepare; struct ev_check check; #if EV_FORK_ENABLE struct ev_fork fork; #endif #if EV_CLEANUP_ENABLE struct ev_cleanup cleanup; #endif #if EV_EMBED_ENABLE struct ev_embed embed; #endif #if EV_ASYNC_ENABLE struct ev_async async; #endif }; /* flag bits for ev_default_loop and ev_loop_new */ enum { /* the default */ EVFLAG_AUTO = 0x00000000U, /* not quite a mask */ /* flag bits */ EVFLAG_NOENV = 0x01000000U, /* do NOT consult environment */ EVFLAG_FORKCHECK = 0x02000000U, /* check for a fork in each iteration */ /* debugging/feature disable */ EVFLAG_NOINOTIFY = 0x00100000U, /* do not attempt to use inotify */ #if EV_COMPAT3 EVFLAG_NOSIGFD = 0, /* compatibility to pre-3.9 */ #endif EVFLAG_SIGNALFD = 0x00200000U, /* attempt to use signalfd */ EVFLAG_NOSIGMASK = 0x00400000U /* avoid modifying the signal mask */ }; /* method bits to be ored together */ enum { EVBACKEND_SELECT = 0x00000001U, /* available just about anywhere */ EVBACKEND_POLL = 0x00000002U, /* !win, !aix, broken on osx */ EVBACKEND_EPOLL = 0x00000004U, /* linux */ EVBACKEND_KQUEUE = 0x00000008U, /* bsd, broken on osx */ EVBACKEND_DEVPOLL = 0x00000010U, /* solaris 8 */ /* NYI */ EVBACKEND_PORT = 0x00000020U, /* solaris 10 */ EVBACKEND_ALL = 0x0000003FU, /* all known backends */ EVBACKEND_MASK = 0x0000FFFFU /* all future backends */ }; #if EV_PROTOTYPES EV_API_DECL int ev_version_major (void) EV_THROW; EV_API_DECL int ev_version_minor (void) EV_THROW; EV_API_DECL unsigned int ev_supported_backends (void) EV_THROW; EV_API_DECL unsigned int ev_recommended_backends (void) EV_THROW; EV_API_DECL unsigned int ev_embeddable_backends (void) EV_THROW; EV_API_DECL ev_tstamp ev_time (void) EV_THROW; EV_API_DECL void ev_sleep (ev_tstamp delay) EV_THROW; /* sleep for a while */ /* Sets the allocation function to use, works like realloc. * It is used to allocate and free memory. * If it returns zero when memory needs to be allocated, the library might abort * or take some potentially destructive action. * The default is your system realloc function. */ EV_API_DECL void ev_set_allocator (void *(*cb)(void *ptr, long size) EV_THROW) EV_THROW; /* set the callback function to call on a * retryable syscall error * (such as failed select, poll, epoll_wait) */ EV_API_DECL void ev_set_syserr_cb (void (*cb)(const char *msg) EV_THROW) EV_THROW; #if EV_MULTIPLICITY /* the default loop is the only one that handles signals and child watchers */ /* you can call this as often as you like */ EV_API_DECL struct ev_loop *ev_default_loop (unsigned int flags EV_CPP (= 0)) EV_THROW; #ifdef EV_API_STATIC EV_API_DECL struct ev_loop *ev_default_loop_ptr; #endif EV_INLINE struct ev_loop * ev_default_loop_uc_ (void) EV_THROW { extern struct ev_loop *ev_default_loop_ptr; return ev_default_loop_ptr; } EV_INLINE int ev_is_default_loop (EV_P) EV_THROW { return EV_A == EV_DEFAULT_UC; } /* create and destroy alternative loops that don't handle signals */ EV_API_DECL struct ev_loop *ev_loop_new (unsigned int flags EV_CPP (= 0)) EV_THROW; EV_API_DECL ev_tstamp ev_now (EV_P) EV_THROW; /* time w.r.t. timers and the eventloop, updated after each poll */ #else EV_API_DECL int ev_default_loop (unsigned int flags EV_CPP (= 0)) EV_THROW; /* returns true when successful */ EV_API_DECL ev_tstamp ev_rt_now; EV_INLINE ev_tstamp ev_now (void) EV_THROW { return ev_rt_now; } /* looks weird, but ev_is_default_loop (EV_A) still works if this exists */ EV_INLINE int ev_is_default_loop (void) EV_THROW { return 1; } #endif /* multiplicity */ /* destroy event loops, also works for the default loop */ EV_API_DECL void ev_loop_destroy (EV_P); /* this needs to be called after fork, to duplicate the loop */ /* when you want to re-use it in the child */ /* you can call it in either the parent or the child */ /* you can actually call it at any time, anywhere :) */ EV_API_DECL void ev_loop_fork (EV_P) EV_THROW; EV_API_DECL unsigned int ev_backend (EV_P) EV_THROW; /* backend in use by loop */ EV_API_DECL void ev_now_update (EV_P) EV_THROW; /* update event loop time */ #if EV_WALK_ENABLE /* walk (almost) all watchers in the loop of a given type, invoking the */ /* callback on every such watcher. The callback might stop the watcher, */ /* but do nothing else with the loop */ EV_API_DECL void ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_THROW; #endif #endif /* prototypes */ /* ev_run flags values */ enum { EVRUN_NOWAIT = 1, /* do not block/wait */ EVRUN_ONCE = 2 /* block *once* only */ }; /* ev_break how values */ enum { EVBREAK_CANCEL = 0, /* undo unloop */ EVBREAK_ONE = 1, /* unloop once */ EVBREAK_ALL = 2 /* unloop all loops */ }; #if EV_PROTOTYPES EV_API_DECL int ev_run (EV_P_ int flags EV_CPP (= 0)); EV_API_DECL void ev_break (EV_P_ int how EV_CPP (= EVBREAK_ONE)) EV_THROW; /* break out of the loop */ /* * ref/unref can be used to add or remove a refcount on the mainloop. every watcher * keeps one reference. if you have a long-running watcher you never unregister that * should not keep ev_loop from running, unref() after starting, and ref() before stopping. */ EV_API_DECL void ev_ref (EV_P) EV_THROW; EV_API_DECL void ev_unref (EV_P) EV_THROW; /* * convenience function, wait for a single event, without registering an event watcher * if timeout is < 0, do wait indefinitely */ EV_API_DECL void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) EV_THROW; # if EV_FEATURE_API EV_API_DECL unsigned int ev_iteration (EV_P) EV_THROW; /* number of loop iterations */ EV_API_DECL unsigned int ev_depth (EV_P) EV_THROW; /* #ev_loop enters - #ev_loop leaves */ EV_API_DECL void ev_verify (EV_P) EV_THROW; /* abort if loop data corrupted */ EV_API_DECL void ev_set_io_collect_interval (EV_P_ ev_tstamp interval) EV_THROW; /* sleep at least this time, default 0 */ EV_API_DECL void ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) EV_THROW; /* sleep at least this time, default 0 */ /* advanced stuff for threading etc. support, see docs */ EV_API_DECL void ev_set_userdata (EV_P_ void *data) EV_THROW; EV_API_DECL void *ev_userdata (EV_P) EV_THROW; typedef void (*ev_loop_callback)(EV_P); EV_API_DECL void ev_set_invoke_pending_cb (EV_P_ ev_loop_callback invoke_pending_cb) EV_THROW; /* C++ doesn't allow the use of the ev_loop_callback typedef here, so we need to spell it out */ EV_API_DECL void ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_THROW, void (*acquire)(EV_P) EV_THROW) EV_THROW; EV_API_DECL unsigned int ev_pending_count (EV_P) EV_THROW; /* number of pending events, if any */ EV_API_DECL void ev_invoke_pending (EV_P); /* invoke all pending watchers */ /* * stop/start the timer handling. */ EV_API_DECL void ev_suspend (EV_P) EV_THROW; EV_API_DECL void ev_resume (EV_P) EV_THROW; #endif #endif /* these may evaluate ev multiple times, and the other arguments at most once */ /* either use ev_init + ev_TYPE_set, or the ev_TYPE_init macro, below, to first initialise a watcher */ #define ev_init(ev,cb_) do { \ ((ev_watcher *)(void *)(ev))->active = \ ((ev_watcher *)(void *)(ev))->pending = 0; \ ev_set_priority ((ev), 0); \ ev_set_cb ((ev), cb_); \ } while (0) #define ev_io_set(ev,fd_,events_) do { (ev)->fd = (fd_); (ev)->events = (events_) | EV__IOFDSET; } while (0) #define ev_timer_set(ev,after_,repeat_) do { ((ev_watcher_time *)(ev))->at = (after_); (ev)->repeat = (repeat_); } while (0) #define ev_periodic_set(ev,ofs_,ival_,rcb_) do { (ev)->offset = (ofs_); (ev)->interval = (ival_); (ev)->reschedule_cb = (rcb_); } while (0) #define ev_signal_set(ev,signum_) do { (ev)->signum = (signum_); } while (0) #define ev_child_set(ev,pid_,trace_) do { (ev)->pid = (pid_); (ev)->flags = !!(trace_); } while (0) #define ev_stat_set(ev,path_,interval_) do { (ev)->path = (path_); (ev)->interval = (interval_); (ev)->wd = -2; } while (0) #define ev_idle_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_prepare_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_check_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_embed_set(ev,other_) do { (ev)->other = (other_); } while (0) #define ev_fork_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_cleanup_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_async_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_io_init(ev,cb,fd,events) do { ev_init ((ev), (cb)); ev_io_set ((ev),(fd),(events)); } while (0) #define ev_timer_init(ev,cb,after,repeat) do { ev_init ((ev), (cb)); ev_timer_set ((ev),(after),(repeat)); } while (0) #define ev_periodic_init(ev,cb,ofs,ival,rcb) do { ev_init ((ev), (cb)); ev_periodic_set ((ev),(ofs),(ival),(rcb)); } while (0) #define ev_signal_init(ev,cb,signum) do { ev_init ((ev), (cb)); ev_signal_set ((ev), (signum)); } while (0) #define ev_child_init(ev,cb,pid,trace) do { ev_init ((ev), (cb)); ev_child_set ((ev),(pid),(trace)); } while (0) #define ev_stat_init(ev,cb,path,interval) do { ev_init ((ev), (cb)); ev_stat_set ((ev),(path),(interval)); } while (0) #define ev_idle_init(ev,cb) do { ev_init ((ev), (cb)); ev_idle_set ((ev)); } while (0) #define ev_prepare_init(ev,cb) do { ev_init ((ev), (cb)); ev_prepare_set ((ev)); } while (0) #define ev_check_init(ev,cb) do { ev_init ((ev), (cb)); ev_check_set ((ev)); } while (0) #define ev_embed_init(ev,cb,other) do { ev_init ((ev), (cb)); ev_embed_set ((ev),(other)); } while (0) #define ev_fork_init(ev,cb) do { ev_init ((ev), (cb)); ev_fork_set ((ev)); } while (0) #define ev_cleanup_init(ev,cb) do { ev_init ((ev), (cb)); ev_cleanup_set ((ev)); } while (0) #define ev_async_init(ev,cb) do { ev_init ((ev), (cb)); ev_async_set ((ev)); } while (0) #define ev_is_pending(ev) (0 + ((ev_watcher *)(void *)(ev))->pending) /* ro, true when watcher is waiting for callback invocation */ #define ev_is_active(ev) (0 + ((ev_watcher *)(void *)(ev))->active) /* ro, true when the watcher has been started */ #define ev_cb_(ev) (ev)->cb /* rw */ #define ev_cb(ev) (memmove (&ev_cb_ (ev), &((ev_watcher *)(ev))->cb, sizeof (ev_cb_ (ev))), (ev)->cb) #if EV_MINPRI == EV_MAXPRI # define ev_priority(ev) ((ev), EV_MINPRI) # define ev_set_priority(ev,pri) ((ev), (pri)) #else # define ev_priority(ev) (+(((ev_watcher *)(void *)(ev))->priority)) # define ev_set_priority(ev,pri) ( (ev_watcher *)(void *)(ev))->priority = (pri) #endif #define ev_periodic_at(ev) (+((ev_watcher_time *)(ev))->at) #ifndef ev_set_cb # define ev_set_cb(ev,cb_) (ev_cb_ (ev) = (cb_), memmove (&((ev_watcher *)(ev))->cb, &ev_cb_ (ev), sizeof (ev_cb_ (ev)))) #endif /* stopping (enabling, adding) a watcher does nothing if it is already running */ /* stopping (disabling, deleting) a watcher does nothing unless it's already running */ #if EV_PROTOTYPES /* feeds an event into a watcher as if the event actually occurred */ /* accepts any ev_watcher type */ EV_API_DECL void ev_feed_event (EV_P_ void *w, int revents) EV_THROW; EV_API_DECL void ev_feed_fd_event (EV_P_ int fd, int revents) EV_THROW; #if EV_SIGNAL_ENABLE EV_API_DECL void ev_feed_signal (int signum) EV_THROW; EV_API_DECL void ev_feed_signal_event (EV_P_ int signum) EV_THROW; #endif EV_API_DECL void ev_invoke (EV_P_ void *w, int revents); EV_API_DECL int ev_clear_pending (EV_P_ void *w) EV_THROW; EV_API_DECL void ev_io_start (EV_P_ ev_io *w) EV_THROW; EV_API_DECL void ev_io_stop (EV_P_ ev_io *w) EV_THROW; EV_API_DECL void ev_timer_start (EV_P_ ev_timer *w) EV_THROW; EV_API_DECL void ev_timer_stop (EV_P_ ev_timer *w) EV_THROW; /* stops if active and no repeat, restarts if active and repeating, starts if inactive and repeating */ EV_API_DECL void ev_timer_again (EV_P_ ev_timer *w) EV_THROW; /* return remaining time */ EV_API_DECL ev_tstamp ev_timer_remaining (EV_P_ ev_timer *w) EV_THROW; #if EV_PERIODIC_ENABLE EV_API_DECL void ev_periodic_start (EV_P_ ev_periodic *w) EV_THROW; EV_API_DECL void ev_periodic_stop (EV_P_ ev_periodic *w) EV_THROW; EV_API_DECL void ev_periodic_again (EV_P_ ev_periodic *w) EV_THROW; #endif /* only supported in the default loop */ #if EV_SIGNAL_ENABLE EV_API_DECL void ev_signal_start (EV_P_ ev_signal *w) EV_THROW; EV_API_DECL void ev_signal_stop (EV_P_ ev_signal *w) EV_THROW; #endif /* only supported in the default loop */ # if EV_CHILD_ENABLE EV_API_DECL void ev_child_start (EV_P_ ev_child *w) EV_THROW; EV_API_DECL void ev_child_stop (EV_P_ ev_child *w) EV_THROW; # endif # if EV_STAT_ENABLE EV_API_DECL void ev_stat_start (EV_P_ ev_stat *w) EV_THROW; EV_API_DECL void ev_stat_stop (EV_P_ ev_stat *w) EV_THROW; EV_API_DECL void ev_stat_stat (EV_P_ ev_stat *w) EV_THROW; # endif # if EV_IDLE_ENABLE EV_API_DECL void ev_idle_start (EV_P_ ev_idle *w) EV_THROW; EV_API_DECL void ev_idle_stop (EV_P_ ev_idle *w) EV_THROW; # endif #if EV_PREPARE_ENABLE EV_API_DECL void ev_prepare_start (EV_P_ ev_prepare *w) EV_THROW; EV_API_DECL void ev_prepare_stop (EV_P_ ev_prepare *w) EV_THROW; #endif #if EV_CHECK_ENABLE EV_API_DECL void ev_check_start (EV_P_ ev_check *w) EV_THROW; EV_API_DECL void ev_check_stop (EV_P_ ev_check *w) EV_THROW; #endif # if EV_FORK_ENABLE EV_API_DECL void ev_fork_start (EV_P_ ev_fork *w) EV_THROW; EV_API_DECL void ev_fork_stop (EV_P_ ev_fork *w) EV_THROW; # endif # if EV_CLEANUP_ENABLE EV_API_DECL void ev_cleanup_start (EV_P_ ev_cleanup *w) EV_THROW; EV_API_DECL void ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_THROW; # endif # if EV_EMBED_ENABLE /* only supported when loop to be embedded is in fact embeddable */ EV_API_DECL void ev_embed_start (EV_P_ ev_embed *w) EV_THROW; EV_API_DECL void ev_embed_stop (EV_P_ ev_embed *w) EV_THROW; EV_API_DECL void ev_embed_sweep (EV_P_ ev_embed *w) EV_THROW; # endif # if EV_ASYNC_ENABLE EV_API_DECL void ev_async_start (EV_P_ ev_async *w) EV_THROW; EV_API_DECL void ev_async_stop (EV_P_ ev_async *w) EV_THROW; EV_API_DECL void ev_async_send (EV_P_ ev_async *w) EV_THROW; # endif #if EV_COMPAT3 #define EVLOOP_NONBLOCK EVRUN_NOWAIT #define EVLOOP_ONESHOT EVRUN_ONCE #define EVUNLOOP_CANCEL EVBREAK_CANCEL #define EVUNLOOP_ONE EVBREAK_ONE #define EVUNLOOP_ALL EVBREAK_ALL #if EV_PROTOTYPES EV_INLINE void ev_loop (EV_P_ int flags) { ev_run (EV_A_ flags); } EV_INLINE void ev_unloop (EV_P_ int how ) { ev_break (EV_A_ how ); } EV_INLINE void ev_default_destroy (void) { ev_loop_destroy (EV_DEFAULT); } EV_INLINE void ev_default_fork (void) { ev_loop_fork (EV_DEFAULT); } #if EV_FEATURE_API EV_INLINE unsigned int ev_loop_count (EV_P) { return ev_iteration (EV_A); } EV_INLINE unsigned int ev_loop_depth (EV_P) { return ev_depth (EV_A); } EV_INLINE void ev_loop_verify (EV_P) { ev_verify (EV_A); } #endif #endif #else typedef struct ev_loop ev_loop; #endif #endif EV_CPP(}) #endif cool.io-1.8.1/ext/libev/LICENSE0000644000004100000410000000400714632135713016027 0ustar www-datawww-dataAll files in libev are Copyright (c)2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Alternatively, the contents of this package may be used under the terms of the GNU General Public License ("GPL") version 2 or any later version, in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this package only under the terms of the GPL and not to allow others to use your version of this file under the BSD license, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL in this and the other files of this package. If you do not delete the provisions above, a recipient may use your version of this file under either the BSD or the GPL. cool.io-1.8.1/ext/libev/README0000644000004100000410000000477414632135713015715 0ustar www-datawww-datalibev is a high-performance event loop/event model with lots of features. (see benchmark at http://libev.schmorp.de/bench.html) ABOUT Homepage: http://software.schmorp.de/pkg/libev Mailinglist: libev@lists.schmorp.de http://lists.schmorp.de/cgi-bin/mailman/listinfo/libev Library Documentation: http://pod.tst.eu/http://cvs.schmorp.de/libev/ev.pod Libev is modelled (very losely) after libevent and the Event perl module, but is faster, scales better and is more correct, and also more featureful. And also smaller. Yay. Some of the specialties of libev not commonly found elsewhere are: - extensive and detailed, readable documentation (not doxygen garbage). - fully supports fork, can detect fork in various ways and automatically re-arms kernel mechanisms that do not support fork. - highly optimised select, poll, epoll, kqueue and event ports backends. - filesystem object (path) watching (with optional linux inotify support). - wallclock-based times (using absolute time, cron-like). - relative timers/timeouts (handle time jumps). - fast intra-thread communication between multiple event loops (with optional fast linux eventfd backend). - extremely easy to embed (fully documented, no dependencies, autoconf supported but optional). - very small codebase, no bloated library, simple code. - fully extensible by being able to plug into the event loop, integrate other event loops, integrate other event loop users. - very little memory use (small watchers, small event loop data). - optional C++ interface allowing method and function callbacks at no extra memory or runtime overhead. - optional Perl interface with similar characteristics (capable of running Glib/Gtk2 on libev). - support for other languages (multiple C++ interfaces, D, Ruby, Python) available from third-parties. Examples of programs that embed libev: the EV perl module, node.js, auditd, rxvt-unicode, gvpe (GNU Virtual Private Ethernet), the Deliantra MMORPG server (http://www.deliantra.net/), Rubinius (a next-generation Ruby VM), the Ebb web server, the Rev event toolkit. CONTRIBUTORS libev was written and designed by Marc Lehmann and Emanuele Giaquinta. The following people sent in patches or made other noteworthy contributions to the design (for minor patches, see the Changes file. If I forgot to include you, please shout at me, it was an accident): W.C.A. Wijngaards Christopher Layne Chris Brody cool.io-1.8.1/ext/libev/ev_port.c0000644000004100000410000001440414632135713016646 0ustar www-datawww-data/* * libev solaris event port backend * * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ /* useful reading: * * http://bugs.opensolaris.org/view_bug.do?bug_id=6268715 (random results) * http://bugs.opensolaris.org/view_bug.do?bug_id=6455223 (just totally broken) * http://bugs.opensolaris.org/view_bug.do?bug_id=6873782 (manpage ETIME) * http://bugs.opensolaris.org/view_bug.do?bug_id=6874410 (implementation ETIME) * http://www.mail-archive.com/networking-discuss@opensolaris.org/msg11898.html ETIME vs. nget * http://src.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/lib/libc/port/gen/event_port.c (libc) * http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/fs/portfs/port.c#1325 (kernel) */ #include #include #include #include #include #include inline_speed void port_associate_and_check (EV_P_ int fd, int ev) { if (0 > port_associate ( backend_fd, PORT_SOURCE_FD, fd, (ev & EV_READ ? POLLIN : 0) | (ev & EV_WRITE ? POLLOUT : 0), 0 ) ) { if (errno == EBADFD) fd_kill (EV_A_ fd); else ev_syserr ("(libev) port_associate"); } } static void port_modify (EV_P_ int fd, int oev, int nev) { /* we need to reassociate no matter what, as closes are * once more silently being discarded. */ if (!nev) { if (oev) port_dissociate (backend_fd, PORT_SOURCE_FD, fd); } else port_associate_and_check (EV_A_ fd, nev); } static void port_poll (EV_P_ ev_tstamp timeout) { int res, i; struct timespec ts; uint_t nget = 1; /* we initialise this to something we will skip in the loop, as */ /* port_getn can return with nget unchanged, but no indication */ /* whether it was the original value or has been updated :/ */ port_events [0].portev_source = 0; EV_RELEASE_CB; EV_TS_SET (ts, timeout); res = port_getn (backend_fd, port_events, port_eventmax, &nget, &ts); EV_ACQUIRE_CB; /* port_getn may or may not set nget on error */ /* so we rely on port_events [0].portev_source not being updated */ if (res == -1 && errno != ETIME && errno != EINTR) ev_syserr ("(libev) port_getn (see http://bugs.opensolaris.org/view_bug.do?bug_id=6268715, try LIBEV_FLAGS=3 env variable)"); for (i = 0; i < nget; ++i) { if (port_events [i].portev_source == PORT_SOURCE_FD) { int fd = port_events [i].portev_object; fd_event ( EV_A_ fd, (port_events [i].portev_events & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) | (port_events [i].portev_events & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) ); fd_change (EV_A_ fd, EV__IOFDSET); } } if (expect_false (nget == port_eventmax)) { ev_free (port_events); port_eventmax = array_nextsize (sizeof (port_event_t), port_eventmax, port_eventmax + 1); port_events = (port_event_t *)ev_malloc (sizeof (port_event_t) * port_eventmax); } } inline_size int port_init (EV_P_ int flags) { /* Initialize the kernel queue */ if ((backend_fd = port_create ()) < 0) return 0; assert (("libev: PORT_SOURCE_FD must not be zero", PORT_SOURCE_FD)); fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ /* if my reading of the opensolaris kernel sources are correct, then * opensolaris does something very stupid: it checks if the time has already * elapsed and doesn't round up if that is the case,m otherwise it DOES round * up. Since we can't know what the case is, we need to guess by using a * "large enough" timeout. Normally, 1e-9 would be correct. */ backend_mintime = 1e-3; /* needed to compensate for port_getn returning early */ backend_modify = port_modify; backend_poll = port_poll; port_eventmax = 64; /* initial number of events receivable per poll */ port_events = (port_event_t *)ev_malloc (sizeof (port_event_t) * port_eventmax); return EVBACKEND_PORT; } inline_size void port_destroy (EV_P) { ev_free (port_events); } inline_size void port_fork (EV_P) { close (backend_fd); while ((backend_fd = port_create ()) < 0) ev_syserr ("(libev) port"); fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* re-register interest in fds */ fd_rearm_all (EV_A); } cool.io-1.8.1/ext/libev/ev.c0000644000004100000410000040365014632135713015607 0ustar www-datawww-data/* * libev event processing core, watcher management * * Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ /* ########## COOLIO PATCHERY HO! ########## */ #include "ruby.h" #if defined(HAVE_RUBY_THREAD_H) #include "ruby/thread.h" #endif /* ######################################## */ /* this big block deduces configuration from config.h */ #ifndef EV_STANDALONE # ifdef EV_CONFIG_H # include EV_CONFIG_H # else # include "config.h" # endif # if HAVE_FLOOR # ifndef EV_USE_FLOOR # define EV_USE_FLOOR 1 # endif # endif # if HAVE_CLOCK_SYSCALL # ifndef EV_USE_CLOCK_SYSCALL # define EV_USE_CLOCK_SYSCALL 1 # ifndef EV_USE_REALTIME # define EV_USE_REALTIME 0 # endif # ifndef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 1 # endif # endif # elif !defined EV_USE_CLOCK_SYSCALL # define EV_USE_CLOCK_SYSCALL 0 # endif # if HAVE_CLOCK_GETTIME # ifndef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 1 # endif # ifndef EV_USE_REALTIME # define EV_USE_REALTIME 0 # endif # else # ifndef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 0 # endif # ifndef EV_USE_REALTIME # define EV_USE_REALTIME 0 # endif # endif # if HAVE_NANOSLEEP # ifndef EV_USE_NANOSLEEP # define EV_USE_NANOSLEEP EV_FEATURE_OS # endif # else # undef EV_USE_NANOSLEEP # define EV_USE_NANOSLEEP 0 # endif # if HAVE_SELECT && HAVE_SYS_SELECT_H # ifndef EV_USE_SELECT # define EV_USE_SELECT EV_FEATURE_BACKENDS # endif # else # undef EV_USE_SELECT # define EV_USE_SELECT 0 # endif # if HAVE_POLL && HAVE_POLL_H # ifndef EV_USE_POLL # define EV_USE_POLL EV_FEATURE_BACKENDS # endif # else # undef EV_USE_POLL # define EV_USE_POLL 0 # endif # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H # ifndef EV_USE_EPOLL # define EV_USE_EPOLL EV_FEATURE_BACKENDS # endif # else # undef EV_USE_EPOLL # define EV_USE_EPOLL 0 # endif # if HAVE_KQUEUE && HAVE_SYS_EVENT_H # ifndef EV_USE_KQUEUE # define EV_USE_KQUEUE EV_FEATURE_BACKENDS # endif # else # undef EV_USE_KQUEUE # define EV_USE_KQUEUE 0 # endif # if HAVE_PORT_H && HAVE_PORT_CREATE # ifndef EV_USE_PORT # define EV_USE_PORT EV_FEATURE_BACKENDS # endif # else # undef EV_USE_PORT # define EV_USE_PORT 0 # endif # if HAVE_INOTIFY_INIT && HAVE_SYS_INOTIFY_H # ifndef EV_USE_INOTIFY # define EV_USE_INOTIFY EV_FEATURE_OS # endif # else # undef EV_USE_INOTIFY # define EV_USE_INOTIFY 0 # endif # if HAVE_SIGNALFD && HAVE_SYS_SIGNALFD_H # ifndef EV_USE_SIGNALFD # define EV_USE_SIGNALFD EV_FEATURE_OS # endif # else # undef EV_USE_SIGNALFD # define EV_USE_SIGNALFD 0 # endif # if HAVE_EVENTFD # ifndef EV_USE_EVENTFD # define EV_USE_EVENTFD EV_FEATURE_OS # endif # else # undef EV_USE_EVENTFD # define EV_USE_EVENTFD 0 # endif #endif #include #include #include #include #include #include #include #include #include #include #include #ifdef EV_H # include EV_H #else # include "ev.h" #endif #if EV_NO_THREADS # undef EV_NO_SMP # define EV_NO_SMP 1 # undef ECB_NO_THREADS # define ECB_NO_THREADS 1 #endif #if EV_NO_SMP # undef EV_NO_SMP # define ECB_NO_SMP 1 #endif #ifndef _WIN32 # include # include # include #else # include # define WIN32_LEAN_AND_MEAN # define FD_SETSIZE 1024 # include # include # ifndef EV_SELECT_IS_WINSOCKET # define EV_SELECT_IS_WINSOCKET 1 # endif # undef EV_AVOID_STDIO #endif /* OS X, in its infinite idiocy, actually HARDCODES * a limit of 1024 into their select. Where people have brains, * OS X engineers apparently have a vacuum. Or maybe they were * ordered to have a vacuum, or they do anything for money. * This might help. Or not. */ #define _DARWIN_UNLIMITED_SELECT 1 /* this block tries to deduce configuration from header-defined symbols and defaults */ /* try to deduce the maximum number of signals on this platform */ #if defined EV_NSIG /* use what's provided */ #elif defined NSIG # define EV_NSIG (NSIG) #elif defined _NSIG # define EV_NSIG (_NSIG) #elif defined SIGMAX # define EV_NSIG (SIGMAX+1) #elif defined SIG_MAX # define EV_NSIG (SIG_MAX+1) #elif defined _SIG_MAX # define EV_NSIG (_SIG_MAX+1) #elif defined MAXSIG # define EV_NSIG (MAXSIG+1) #elif defined MAX_SIG # define EV_NSIG (MAX_SIG+1) #elif defined SIGARRAYSIZE # define EV_NSIG (SIGARRAYSIZE) /* Assume ary[SIGARRAYSIZE] */ #elif defined _sys_nsig # define EV_NSIG (_sys_nsig) /* Solaris 2.5 */ #else # define EV_NSIG (8 * sizeof (sigset_t) + 1) #endif #ifndef EV_USE_FLOOR # define EV_USE_FLOOR 0 #endif #ifndef EV_USE_CLOCK_SYSCALL # if __linux && __GLIBC__ == 2 && __GLIBC_MINOR__ < 17 # define EV_USE_CLOCK_SYSCALL EV_FEATURE_OS # else # define EV_USE_CLOCK_SYSCALL 0 # endif #endif #if !(_POSIX_TIMERS > 0) # ifndef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 0 # endif # ifndef EV_USE_REALTIME # define EV_USE_REALTIME 0 # endif #endif #ifndef EV_USE_MONOTONIC # if defined _POSIX_MONOTONIC_CLOCK && _POSIX_MONOTONIC_CLOCK >= 0 # define EV_USE_MONOTONIC EV_FEATURE_OS # else # define EV_USE_MONOTONIC 0 # endif #endif #ifndef EV_USE_REALTIME # define EV_USE_REALTIME !EV_USE_CLOCK_SYSCALL #endif #ifndef EV_USE_NANOSLEEP # if _POSIX_C_SOURCE >= 199309L # define EV_USE_NANOSLEEP EV_FEATURE_OS # else # define EV_USE_NANOSLEEP 0 # endif #endif #ifndef EV_USE_SELECT # define EV_USE_SELECT EV_FEATURE_BACKENDS #endif #ifndef EV_USE_POLL # ifdef _WIN32 # define EV_USE_POLL 0 # else # define EV_USE_POLL EV_FEATURE_BACKENDS # endif #endif #ifndef EV_USE_EPOLL # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4)) # define EV_USE_EPOLL EV_FEATURE_BACKENDS # else # define EV_USE_EPOLL 0 # endif #endif #ifndef EV_USE_KQUEUE # define EV_USE_KQUEUE 0 #endif #ifndef EV_USE_PORT # define EV_USE_PORT 0 #endif #ifndef EV_USE_INOTIFY # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4)) # define EV_USE_INOTIFY EV_FEATURE_OS # else # define EV_USE_INOTIFY 0 # endif #endif #ifndef EV_PID_HASHSIZE # define EV_PID_HASHSIZE EV_FEATURE_DATA ? 16 : 1 #endif #ifndef EV_INOTIFY_HASHSIZE # define EV_INOTIFY_HASHSIZE EV_FEATURE_DATA ? 16 : 1 #endif #ifndef EV_USE_EVENTFD # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) # define EV_USE_EVENTFD EV_FEATURE_OS # else # define EV_USE_EVENTFD 0 # endif #endif #ifndef EV_USE_SIGNALFD # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) # define EV_USE_SIGNALFD EV_FEATURE_OS # else # define EV_USE_SIGNALFD 0 # endif #endif #if 0 /* debugging */ # define EV_VERIFY 3 # define EV_USE_4HEAP 1 # define EV_HEAP_CACHE_AT 1 #endif #ifndef EV_VERIFY # define EV_VERIFY (EV_FEATURE_API ? 1 : 0) #endif #ifndef EV_USE_4HEAP # define EV_USE_4HEAP EV_FEATURE_DATA #endif #ifndef EV_HEAP_CACHE_AT # define EV_HEAP_CACHE_AT EV_FEATURE_DATA #endif #ifdef ANDROID /* supposedly, android doesn't typedef fd_mask */ # undef EV_USE_SELECT # define EV_USE_SELECT 0 /* supposedly, we need to include syscall.h, not sys/syscall.h, so just disable */ # undef EV_USE_CLOCK_SYSCALL # define EV_USE_CLOCK_SYSCALL 0 #endif /* aix's poll.h seems to cause lots of trouble */ #ifdef _AIX /* AIX has a completely broken poll.h header */ # undef EV_USE_POLL # define EV_USE_POLL 0 #endif /* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */ /* which makes programs even slower. might work on other unices, too. */ #if EV_USE_CLOCK_SYSCALL # include # ifdef SYS_clock_gettime # define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts)) # undef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 1 # else # undef EV_USE_CLOCK_SYSCALL # define EV_USE_CLOCK_SYSCALL 0 # endif #endif /* this block fixes any misconfiguration where we know we run into trouble otherwise */ #ifndef CLOCK_MONOTONIC # undef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 0 #endif #ifndef CLOCK_REALTIME # undef EV_USE_REALTIME # define EV_USE_REALTIME 0 #endif #if !EV_STAT_ENABLE # undef EV_USE_INOTIFY # define EV_USE_INOTIFY 0 #endif #if !EV_USE_NANOSLEEP /* hp-ux has it in sys/time.h, which we unconditionally include above */ # if !defined _WIN32 && !defined __hpux # include # endif #endif #if EV_USE_INOTIFY # include # include /* some very old inotify.h headers don't have IN_DONT_FOLLOW */ # ifndef IN_DONT_FOLLOW # undef EV_USE_INOTIFY # define EV_USE_INOTIFY 0 # endif #endif #if EV_USE_EVENTFD /* our minimum requirement is glibc 2.7 which has the stub, but not the header */ # include # ifndef EFD_NONBLOCK # define EFD_NONBLOCK O_NONBLOCK # endif # ifndef EFD_CLOEXEC # ifdef O_CLOEXEC # define EFD_CLOEXEC O_CLOEXEC # else # define EFD_CLOEXEC 02000000 # endif # endif EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags); #endif #if EV_USE_SIGNALFD /* our minimum requirement is glibc 2.7 which has the stub, but not the header */ # include # ifndef SFD_NONBLOCK # define SFD_NONBLOCK O_NONBLOCK # endif # ifndef SFD_CLOEXEC # ifdef O_CLOEXEC # define SFD_CLOEXEC O_CLOEXEC # else # define SFD_CLOEXEC 02000000 # endif # endif EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags); struct signalfd_siginfo { uint32_t ssi_signo; char pad[128 - sizeof (uint32_t)]; }; #endif /**/ #if EV_VERIFY >= 3 # define EV_FREQUENT_CHECK ev_verify (EV_A) #else # define EV_FREQUENT_CHECK do { } while (0) #endif /* * This is used to work around floating point rounding problems. * This value is good at least till the year 4000. */ #define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */ /*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */ #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ #define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) #define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) /* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ /* ECB.H BEGIN */ /* * libecb - http://software.schmorp.de/pkg/libecb * * Copyright (©) 2009-2015 Marc Alexander Lehmann * Copyright (©) 2011 Emanuele Giaquinta * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #ifndef ECB_H #define ECB_H /* 16 bits major, 16 bits minor */ #define ECB_VERSION 0x00010005 #ifdef _WIN32 typedef signed char int8_t; typedef unsigned char uint8_t; typedef signed short int16_t; typedef unsigned short uint16_t; typedef signed int int32_t; typedef unsigned int uint32_t; #if __GNUC__ typedef signed long long int64_t; typedef unsigned long long uint64_t; #else /* _MSC_VER || __BORLANDC__ */ typedef signed __int64 int64_t; typedef unsigned __int64 uint64_t; #endif #ifdef _WIN64 #define ECB_PTRSIZE 8 typedef uint64_t uintptr_t; typedef int64_t intptr_t; #else #define ECB_PTRSIZE 4 typedef uint32_t uintptr_t; typedef int32_t intptr_t; #endif #else #include #if (defined INTPTR_MAX ? INTPTR_MAX : ULONG_MAX) > 0xffffffffU #define ECB_PTRSIZE 8 #else #define ECB_PTRSIZE 4 #endif #endif #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__) #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64) /* work around x32 idiocy by defining proper macros */ #if ECB_GCC_AMD64 || ECB_MSVC_AMD64 #if _ILP32 #define ECB_AMD64_X32 1 #else #define ECB_AMD64 1 #endif #endif /* many compilers define _GNUC_ to some versions but then only implement * what their idiot authors think are the "more important" extensions, * causing enormous grief in return for some better fake benchmark numbers. * or so. * we try to detect these and simply assume they are not gcc - if they have * an issue with that they should have done it right in the first place. */ #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__ #define ECB_GCC_VERSION(major,minor) 0 #else #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) #endif #define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor))) #if __clang__ && defined __has_builtin #define ECB_CLANG_BUILTIN(x) __has_builtin (x) #else #define ECB_CLANG_BUILTIN(x) 0 #endif #if __clang__ && defined __has_extension #define ECB_CLANG_EXTENSION(x) __has_extension (x) #else #define ECB_CLANG_EXTENSION(x) 0 #endif #define ECB_CPP (__cplusplus+0) #define ECB_CPP11 (__cplusplus >= 201103L) #if ECB_CPP #define ECB_C 0 #define ECB_STDC_VERSION 0 #else #define ECB_C 1 #define ECB_STDC_VERSION __STDC_VERSION__ #endif #define ECB_C99 (ECB_STDC_VERSION >= 199901L) #define ECB_C11 (ECB_STDC_VERSION >= 201112L) #if ECB_CPP #define ECB_EXTERN_C extern "C" #define ECB_EXTERN_C_BEG ECB_EXTERN_C { #define ECB_EXTERN_C_END } #else #define ECB_EXTERN_C extern #define ECB_EXTERN_C_BEG #define ECB_EXTERN_C_END #endif /*****************************************************************************/ /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ #if ECB_NO_THREADS #define ECB_NO_SMP 1 #endif #if ECB_NO_SMP #define ECB_MEMORY_FENCE do { } while (0) #endif /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */ #if __xlC__ && ECB_CPP #include #endif #if 1400 <= _MSC_VER #include /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */ #endif #ifndef ECB_MEMORY_FENCE #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 #if __i386 || __i386__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") #elif ECB_GCC_AMD64 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") #elif defined __ARM_ARCH_2__ \ || defined __ARM_ARCH_3__ || defined __ARM_ARCH_3M__ \ || defined __ARM_ARCH_4__ || defined __ARM_ARCH_4T__ \ || defined __ARM_ARCH_5__ || defined __ARM_ARCH_5E__ \ || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__ \ || defined __ARM_ARCH_5TEJ__ /* should not need any, unless running old code on newer cpu - arm doesn't support that */ #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ \ || defined __ARM_ARCH_6T2__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") #elif __aarch64__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory") #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8) #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") #elif defined __s390__ || defined __s390x__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") #elif defined __mips__ /* GNU/Linux emulates sync on mips1 architectures, so we force its use */ /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */ #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory") #elif defined __alpha__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") #elif defined __hppa__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") #elif defined __ia64__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory") #elif defined __m68k__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") #elif defined __m88k__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory") #elif defined __sh__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") #endif #endif #endif #ifndef ECB_MEMORY_FENCE #if ECB_GCC_VERSION(4,7) /* see comment below (stdatomic.h) about the C11 memory model. */ #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE) #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE) #elif ECB_CLANG_EXTENSION(c_atomic) /* see comment below (stdatomic.h) about the C11 memory model. */ #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE) #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE) #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ #define ECB_MEMORY_FENCE __sync_synchronize () #elif _MSC_VER >= 1500 /* VC++ 2008 */ /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */ #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier() #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */ #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier() #elif _MSC_VER >= 1400 /* VC++ 2005 */ #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) #define ECB_MEMORY_FENCE _ReadWriteBarrier () #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () #elif defined _WIN32 #include #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 #include #define ECB_MEMORY_FENCE __machine_rw_barrier () #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier () #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier () #elif __xlC__ #define ECB_MEMORY_FENCE __sync () #endif #endif #ifndef ECB_MEMORY_FENCE #if ECB_C11 && !defined __STDC_NO_ATOMICS__ /* we assume that these memory fences work on all variables/all memory accesses, */ /* not just C11 atomics and atomic accesses */ #include /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */ /* any fence other than seq_cst, which isn't very efficient for us. */ /* Why that is, we don't know - either the C11 memory model is quite useless */ /* for most usages, or gcc and clang have a bug */ /* I *currently* lean towards the latter, and inefficiently implement */ /* all three of ecb's fences as a seq_cst fence */ /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */ /* for all __atomic_thread_fence's except seq_cst */ #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) #endif #endif #ifndef ECB_MEMORY_FENCE #if !ECB_AVOID_PTHREADS /* * if you get undefined symbol references to pthread_mutex_lock, * or failure to find pthread.h, then you should implement * the ECB_MEMORY_FENCE operations for your cpu/compiler * OR provide pthread.h and link against the posix thread library * of your system. */ #include #define ECB_NEEDS_PTHREADS 1 #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1 static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER; #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0) #endif #endif #if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE #endif #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE #endif /*****************************************************************************/ #if ECB_CPP #define ecb_inline static inline #elif ECB_GCC_VERSION(2,5) #define ecb_inline static __inline__ #elif ECB_C99 #define ecb_inline static inline #else #define ecb_inline static #endif #if ECB_GCC_VERSION(3,3) #define ecb_restrict __restrict__ #elif ECB_C99 #define ecb_restrict restrict #else #define ecb_restrict #endif typedef int ecb_bool; #define ECB_CONCAT_(a, b) a ## b #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b) #define ECB_STRINGIFY_(a) # a #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) #define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr)) #define ecb_function_ ecb_inline #if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8) #define ecb_attribute(attrlist) __attribute__ (attrlist) #else #define ecb_attribute(attrlist) #endif #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p) #define ecb_is_constant(expr) __builtin_constant_p (expr) #else /* possible C11 impl for integral types typedef struct ecb_is_constant_struct ecb_is_constant_struct; #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */ #define ecb_is_constant(expr) 0 #endif #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect) #define ecb_expect(expr,value) __builtin_expect ((expr),(value)) #else #define ecb_expect(expr,value) (expr) #endif #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch) #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality) #else #define ecb_prefetch(addr,rw,locality) #endif /* no emulation for ecb_decltype */ #if ECB_CPP11 // older implementations might have problems with decltype(x)::type, work around it template struct ecb_decltype_t { typedef T type; }; #define ecb_decltype(x) ecb_decltype_t::type #elif ECB_GCC_VERSION(3,0) || ECB_CLANG_VERSION(2,8) #define ecb_decltype(x) __typeof__ (x) #endif #if _MSC_VER >= 1300 #define ecb_deprecated __declspec (deprecated) #else #define ecb_deprecated ecb_attribute ((__deprecated__)) #endif #if _MSC_VER >= 1500 #define ecb_deprecated_message(msg) __declspec (deprecated (msg)) #elif ECB_GCC_VERSION(4,5) #define ecb_deprecated_message(msg) ecb_attribute ((__deprecated__ (msg)) #else #define ecb_deprecated_message(msg) ecb_deprecated #endif #if _MSC_VER >= 1400 #define ecb_noinline __declspec (noinline) #else #define ecb_noinline ecb_attribute ((__noinline__)) #endif #define ecb_unused ecb_attribute ((__unused__)) #define ecb_const ecb_attribute ((__const__)) #define ecb_pure ecb_attribute ((__pure__)) #if ECB_C11 || __IBMC_NORETURN /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/language_ref/noreturn.html */ #define ecb_noreturn _Noreturn #elif ECB_CPP11 #define ecb_noreturn [[noreturn]] #elif _MSC_VER >= 1200 /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx */ #define ecb_noreturn __declspec (noreturn) #else #define ecb_noreturn ecb_attribute ((__noreturn__)) #endif #if ECB_GCC_VERSION(4,3) #define ecb_artificial ecb_attribute ((__artificial__)) #define ecb_hot ecb_attribute ((__hot__)) #define ecb_cold ecb_attribute ((__cold__)) #else #define ecb_artificial #define ecb_hot #define ecb_cold #endif /* put around conditional expressions if you are very sure that the */ /* expression is mostly true or mostly false. note that these return */ /* booleans, not the expression. */ #define ecb_expect_false(expr) ecb_expect (!!(expr), 0) #define ecb_expect_true(expr) ecb_expect (!!(expr), 1) /* for compatibility to the rest of the world */ #define ecb_likely(expr) ecb_expect_true (expr) #define ecb_unlikely(expr) ecb_expect_false (expr) /* count trailing zero bits and count # of one bits */ #if ECB_GCC_VERSION(3,4) \ || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \ && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \ && ECB_CLANG_BUILTIN(__builtin_popcount)) /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */ #define ecb_ld32(x) (__builtin_clz (x) ^ 31) #define ecb_ld64(x) (__builtin_clzll (x) ^ 63) #define ecb_ctz32(x) __builtin_ctz (x) #define ecb_ctz64(x) __builtin_ctzll (x) #define ecb_popcount32(x) __builtin_popcount (x) /* no popcountll */ #else ecb_function_ ecb_const int ecb_ctz32 (uint32_t x); ecb_function_ ecb_const int ecb_ctz32 (uint32_t x) { #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) unsigned long r; _BitScanForward (&r, x); return (int)r; #else int r = 0; x &= ~x + 1; /* this isolates the lowest bit */ #if ECB_branchless_on_i386 r += !!(x & 0xaaaaaaaa) << 0; r += !!(x & 0xcccccccc) << 1; r += !!(x & 0xf0f0f0f0) << 2; r += !!(x & 0xff00ff00) << 3; r += !!(x & 0xffff0000) << 4; #else if (x & 0xaaaaaaaa) r += 1; if (x & 0xcccccccc) r += 2; if (x & 0xf0f0f0f0) r += 4; if (x & 0xff00ff00) r += 8; if (x & 0xffff0000) r += 16; #endif return r; #endif } ecb_function_ ecb_const int ecb_ctz64 (uint64_t x); ecb_function_ ecb_const int ecb_ctz64 (uint64_t x) { #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM) unsigned long r; _BitScanForward64 (&r, x); return (int)r; #else int shift = x & 0xffffffff ? 0 : 32; return ecb_ctz32 (x >> shift) + shift; #endif } ecb_function_ ecb_const int ecb_popcount32 (uint32_t x); ecb_function_ ecb_const int ecb_popcount32 (uint32_t x) { x -= (x >> 1) & 0x55555555; x = ((x >> 2) & 0x33333333) + (x & 0x33333333); x = ((x >> 4) + x) & 0x0f0f0f0f; x *= 0x01010101; return x >> 24; } ecb_function_ ecb_const int ecb_ld32 (uint32_t x); ecb_function_ ecb_const int ecb_ld32 (uint32_t x) { #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) unsigned long r; _BitScanReverse (&r, x); return (int)r; #else int r = 0; if (x >> 16) { x >>= 16; r += 16; } if (x >> 8) { x >>= 8; r += 8; } if (x >> 4) { x >>= 4; r += 4; } if (x >> 2) { x >>= 2; r += 2; } if (x >> 1) { r += 1; } return r; #endif } ecb_function_ ecb_const int ecb_ld64 (uint64_t x); ecb_function_ ecb_const int ecb_ld64 (uint64_t x) { #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM) unsigned long r; _BitScanReverse64 (&r, x); return (int)r; #else int r = 0; if (x >> 32) { x >>= 32; r += 32; } return r + ecb_ld32 (x); #endif } #endif ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x); ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x); ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); } ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x); ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x) { return ( (x * 0x0802U & 0x22110U) | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16; } ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x); ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x) { x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1); x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2); x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4); x = ( x >> 8 ) | ( x << 8); return x; } ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x); ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x) { x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1); x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2); x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4); x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8); x = ( x >> 16 ) | ( x << 16); return x; } /* popcount64 is only available on 64 bit cpus as gcc builtin */ /* so for this version we are lazy */ ecb_function_ ecb_const int ecb_popcount64 (uint64_t x); ecb_function_ ecb_const int ecb_popcount64 (uint64_t x) { return ecb_popcount32 (x) + ecb_popcount32 (x >> 32); } ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count); ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count); ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count); ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count); ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count); ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count); ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count); ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count); ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); } ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); } ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); } ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); } ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64)) #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16) #define ecb_bswap16(x) __builtin_bswap16 (x) #else #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) #endif #define ecb_bswap32(x) __builtin_bswap32 (x) #define ecb_bswap64(x) __builtin_bswap64 (x) #elif _MSC_VER #include #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x))) #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x))) #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x))) #else ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x); ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x) { return ecb_rotl16 (x, 8); } ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x); ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x) { return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16); } ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x); ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x) { return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32); } #endif #if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable) #define ecb_unreachable() __builtin_unreachable () #else /* this seems to work fine, but gcc always emits a warning for it :/ */ ecb_inline ecb_noreturn void ecb_unreachable (void); ecb_inline ecb_noreturn void ecb_unreachable (void) { } #endif /* try to tell the compiler that some condition is definitely true */ #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0 ecb_inline ecb_const uint32_t ecb_byteorder_helper (void); ecb_inline ecb_const uint32_t ecb_byteorder_helper (void) { /* the union code still generates code under pressure in gcc, */ /* but less than using pointers, and always seems to */ /* successfully return a constant. */ /* the reason why we have this horrible preprocessor mess */ /* is to avoid it in all cases, at least on common architectures */ /* or when using a recent enough gcc version (>= 4.6) */ #if (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ || ((__i386 || __i386__ || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64) && !__VOS__) #define ECB_LITTLE_ENDIAN 1 return 0x44332211; #elif (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) \ || ((__AARCH64EB__ || __MIPSEB__ || __ARMEB__) && !__VOS__) #define ECB_BIG_ENDIAN 1 return 0x11223344; #else union { uint8_t c[4]; uint32_t u; } u = { 0x11, 0x22, 0x33, 0x44 }; return u.u; #endif } ecb_inline ecb_const ecb_bool ecb_big_endian (void); ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; } ecb_inline ecb_const ecb_bool ecb_little_endian (void); ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; } #if ECB_GCC_VERSION(3,0) || ECB_C99 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) #else #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) #endif #if ECB_CPP template static inline T ecb_div_rd (T val, T div) { return val < 0 ? - ((-val + div - 1) / div) : (val ) / div; } template static inline T ecb_div_ru (T val, T div) { return val < 0 ? - ((-val ) / div) : (val + div - 1) / div; } #else #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div)) #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div)) #endif #if ecb_cplusplus_does_not_suck /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */ template static inline int ecb_array_length (const T (&arr)[N]) { return N; } #else #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) #endif ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x); ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x) { unsigned int s = (x & 0x8000) << (31 - 15); int e = (x >> 10) & 0x001f; unsigned int m = x & 0x03ff; if (ecb_expect_false (e == 31)) /* infinity or NaN */ e = 255 - (127 - 15); else if (ecb_expect_false (!e)) { if (ecb_expect_true (!m)) /* zero, handled by code below by forcing e to 0 */ e = 0 - (127 - 15); else { /* subnormal, renormalise */ unsigned int s = 10 - ecb_ld32 (m); m = (m << s) & 0x3ff; /* mask implicit bit */ e -= s - 1; } } /* e and m now are normalised, or zero, (or inf or nan) */ e += 127 - 15; return s | (e << 23) | (m << (23 - 10)); } ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x); ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x) { unsigned int s = (x >> 16) & 0x00008000; /* sign bit, the easy part */ unsigned int e = ((x >> 23) & 0x000000ff) - (127 - 15); /* the desired exponent */ unsigned int m = x & 0x007fffff; x &= 0x7fffffff; /* if it's within range of binary16 normals, use fast path */ if (ecb_expect_true (0x38800000 <= x && x <= 0x477fefff)) { /* mantissa round-to-even */ m += 0x00000fff + ((m >> (23 - 10)) & 1); /* handle overflow */ if (ecb_expect_false (m >= 0x00800000)) { m >>= 1; e += 1; } return s | (e << 10) | (m >> (23 - 10)); } /* handle large numbers and infinity */ if (ecb_expect_true (0x477fefff < x && x <= 0x7f800000)) return s | 0x7c00; /* handle zero, subnormals and small numbers */ if (ecb_expect_true (x < 0x38800000)) { /* zero */ if (ecb_expect_true (!x)) return s; /* handle subnormals */ /* too small, will be zero */ if (e < (14 - 24)) /* might not be sharp, but is good enough */ return s; m |= 0x00800000; /* make implicit bit explicit */ /* very tricky - we need to round to the nearest e (+10) bit value */ { unsigned int bits = 14 - e; unsigned int half = (1 << (bits - 1)) - 1; unsigned int even = (m >> bits) & 1; /* if this overflows, we will end up with a normalised number */ m = (m + half + even) >> bits; } return s | m; } /* handle NaNs, preserve leftmost nan bits, but make sure we don't turn them into infinities */ m >>= 13; return s | 0x7c00 | m | !m; } /*******************************************************************************/ /* floating point stuff, can be disabled by defining ECB_NO_LIBM */ /* basically, everything uses "ieee pure-endian" floating point numbers */ /* the only noteworthy exception is ancient armle, which uses order 43218765 */ #if 0 \ || __i386 || __i386__ \ || ECB_GCC_AMD64 \ || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \ || defined __s390__ || defined __s390x__ \ || defined __mips__ \ || defined __alpha__ \ || defined __hppa__ \ || defined __ia64__ \ || defined __m68k__ \ || defined __m88k__ \ || defined __sh__ \ || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \ || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \ || defined __aarch64__ #define ECB_STDFP 1 #include /* for memcpy */ #else #define ECB_STDFP 0 #endif #ifndef ECB_NO_LIBM #include /* for frexp*, ldexp*, INFINITY, NAN */ /* only the oldest of old doesn't have this one. solaris. */ #ifdef INFINITY #define ECB_INFINITY INFINITY #else #define ECB_INFINITY HUGE_VAL #endif #ifdef NAN #define ECB_NAN NAN #else #define ECB_NAN ECB_INFINITY #endif #if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L #define ecb_ldexpf(x,e) ldexpf ((x), (e)) #define ecb_frexpf(x,e) frexpf ((x), (e)) #else #define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e)) #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e)) #endif /* convert a float to ieee single/binary32 */ ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x); ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x) { uint32_t r; #if ECB_STDFP memcpy (&r, &x, 4); #else /* slow emulation, works for anything but -0 */ uint32_t m; int e; if (x == 0e0f ) return 0x00000000U; if (x > +3.40282346638528860e+38f) return 0x7f800000U; if (x < -3.40282346638528860e+38f) return 0xff800000U; if (x != x ) return 0x7fbfffffU; m = ecb_frexpf (x, &e) * 0x1000000U; r = m & 0x80000000U; if (r) m = -m; if (e <= -126) { m &= 0xffffffU; m >>= (-125 - e); e = -126; } r |= (e + 126) << 23; r |= m & 0x7fffffU; #endif return r; } /* converts an ieee single/binary32 to a float */ ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x); ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x) { float r; #if ECB_STDFP memcpy (&r, &x, 4); #else /* emulation, only works for normals and subnormals and +0 */ int neg = x >> 31; int e = (x >> 23) & 0xffU; x &= 0x7fffffU; if (e) x |= 0x800000U; else e = 1; /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */ r = ecb_ldexpf (x * (0.5f / 0x800000U), e - 126); r = neg ? -r : r; #endif return r; } /* convert a double to ieee double/binary64 */ ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x); ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x) { uint64_t r; #if ECB_STDFP memcpy (&r, &x, 8); #else /* slow emulation, works for anything but -0 */ uint64_t m; int e; if (x == 0e0 ) return 0x0000000000000000U; if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U; if (x < -1.79769313486231470e+308) return 0xfff0000000000000U; if (x != x ) return 0X7ff7ffffffffffffU; m = frexp (x, &e) * 0x20000000000000U; r = m & 0x8000000000000000;; if (r) m = -m; if (e <= -1022) { m &= 0x1fffffffffffffU; m >>= (-1021 - e); e = -1022; } r |= ((uint64_t)(e + 1022)) << 52; r |= m & 0xfffffffffffffU; #endif return r; } /* converts an ieee double/binary64 to a double */ ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x); ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x) { double r; #if ECB_STDFP memcpy (&r, &x, 8); #else /* emulation, only works for normals and subnormals and +0 */ int neg = x >> 63; int e = (x >> 52) & 0x7ffU; x &= 0xfffffffffffffU; if (e) x |= 0x10000000000000U; else e = 1; /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */ r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022); r = neg ? -r : r; #endif return r; } /* convert a float to ieee half/binary16 */ ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x); ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x) { return ecb_binary32_to_binary16 (ecb_float_to_binary32 (x)); } /* convert an ieee half/binary16 to float */ ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x); ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x) { return ecb_binary32_to_float (ecb_binary16_to_binary32 (x)); } #endif #endif /* ECB.H END */ #if ECB_MEMORY_FENCE_NEEDS_PTHREADS /* if your architecture doesn't need memory fences, e.g. because it is * single-cpu/core, or if you use libev in a project that doesn't use libev * from multiple threads, then you can define ECB_AVOID_PTHREADS when compiling * libev, in which cases the memory fences become nops. * alternatively, you can remove this #error and link against libpthread, * which will then provide the memory fences. */ # error "memory fences not defined for your architecture, please report" #endif #ifndef ECB_MEMORY_FENCE # define ECB_MEMORY_FENCE do { } while (0) # define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE # define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE #endif #define expect_false(cond) ecb_expect_false (cond) #define expect_true(cond) ecb_expect_true (cond) #define noinline ecb_noinline #define inline_size ecb_inline #if EV_FEATURE_CODE # define inline_speed ecb_inline #else # define inline_speed noinline static #endif #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) #if EV_MINPRI == EV_MAXPRI # define ABSPRI(w) (((W)w), 0) #else # define ABSPRI(w) (((W)w)->priority - EV_MINPRI) #endif #define EMPTY /* required for microsofts broken pseudo-c compiler */ #define EMPTY2(a,b) /* used to suppress some warnings */ typedef ev_watcher *W; typedef ev_watcher_list *WL; typedef ev_watcher_time *WT; #define ev_active(w) ((W)(w))->active #define ev_at(w) ((WT)(w))->at #if EV_USE_REALTIME /* sig_atomic_t is used to avoid per-thread variables or locking but still */ /* giving it a reasonably high chance of working on typical architectures */ static EV_ATOMIC_T have_realtime; /* did clock_gettime (CLOCK_REALTIME) work? */ #endif #if EV_USE_MONOTONIC static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */ #endif #ifndef EV_FD_TO_WIN32_HANDLE # define EV_FD_TO_WIN32_HANDLE(fd) _get_osfhandle (fd) #endif #ifndef EV_WIN32_HANDLE_TO_FD # define EV_WIN32_HANDLE_TO_FD(handle) _open_osfhandle (handle, 0) #endif #ifndef EV_WIN32_CLOSE_FD # define EV_WIN32_CLOSE_FD(fd) close (fd) #endif #ifdef _WIN32 # include "ev_win32.c" #endif /*****************************************************************************/ /* define a suitable floor function (only used by periodics atm) */ #if EV_USE_FLOOR # include # define ev_floor(v) floor (v) #else #include /* a floor() replacement function, should be independent of ev_tstamp type */ noinline static ev_tstamp ev_floor (ev_tstamp v) { /* the choice of shift factor is not terribly important */ #if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */ const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.; #else const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.; #endif /* argument too large for an unsigned long? */ if (expect_false (v >= shift)) { ev_tstamp f; if (v == v - 1.) return v; /* very large number */ f = shift * ev_floor (v * (1. / shift)); return f + ev_floor (v - f); } /* special treatment for negative args? */ if (expect_false (v < 0.)) { ev_tstamp f = -ev_floor (-v); return f - (f == v ? 0 : 1); } /* fits into an unsigned long */ return (unsigned long)v; } #endif /*****************************************************************************/ #ifdef __linux # include #endif noinline ecb_cold static unsigned int ev_linux_version (void) { #ifdef __linux unsigned int v = 0; struct utsname buf; int i; char *p = buf.release; if (uname (&buf)) return 0; for (i = 3+1; --i; ) { unsigned int c = 0; for (;;) { if (*p >= '0' && *p <= '9') c = c * 10 + *p++ - '0'; else { p += *p == '.'; break; } } v = (v << 8) | c; } return v; #else return 0; #endif } /*****************************************************************************/ #if EV_AVOID_STDIO noinline ecb_cold static void ev_printerr (const char *msg) { write (STDERR_FILENO, msg, strlen (msg)); } #endif static void (*syserr_cb)(const char *msg) EV_THROW; ecb_cold void ev_set_syserr_cb (void (*cb)(const char *msg) EV_THROW) EV_THROW { syserr_cb = cb; } noinline ecb_cold static void ev_syserr (const char *msg) { if (!msg) msg = "(libev) system error"; if (syserr_cb) syserr_cb (msg); else { #if EV_AVOID_STDIO ev_printerr (msg); ev_printerr (": "); ev_printerr (strerror (errno)); ev_printerr ("\n"); #else perror (msg); #endif abort (); } } static void * ev_realloc_emul (void *ptr, long size) EV_THROW { /* some systems, notably openbsd and darwin, fail to properly * implement realloc (x, 0) (as required by both ansi c-89 and * the single unix specification, so work around them here. * recently, also (at least) fedora and debian started breaking it, * despite documenting it otherwise. */ if (size) return realloc (ptr, size); free (ptr); return 0; } static void *(*alloc)(void *ptr, long size) EV_THROW = ev_realloc_emul; ecb_cold void ev_set_allocator (void *(*cb)(void *ptr, long size) EV_THROW) EV_THROW { alloc = cb; } inline_speed void * ev_realloc (void *ptr, long size) { ptr = alloc (ptr, size); if (!ptr && size) { #if EV_AVOID_STDIO ev_printerr ("(libev) memory allocation failed, aborting.\n"); #else fprintf (stderr, "(libev) cannot allocate %ld bytes, aborting.", size); #endif abort (); } return ptr; } #define ev_malloc(size) ev_realloc (0, (size)) #define ev_free(ptr) ev_realloc ((ptr), 0) /*****************************************************************************/ /* set in reify when reification needed */ #define EV_ANFD_REIFY 1 /* file descriptor info structure */ typedef struct { WL head; unsigned char events; /* the events watched for */ unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */ unsigned char emask; /* the epoll backend stores the actual kernel mask in here */ unsigned char unused; #if EV_USE_EPOLL unsigned int egen; /* generation counter to counter epoll bugs */ #endif #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP SOCKET handle; #endif #if EV_USE_IOCP OVERLAPPED or, ow; #endif } ANFD; /* stores the pending event set for a given watcher */ typedef struct { W w; int events; /* the pending event set for the given watcher */ } ANPENDING; #if EV_USE_INOTIFY /* hash table entry per inotify-id */ typedef struct { WL head; } ANFS; #endif /* Heap Entry */ #if EV_HEAP_CACHE_AT /* a heap element */ typedef struct { ev_tstamp at; WT w; } ANHE; #define ANHE_w(he) (he).w /* access watcher, read-write */ #define ANHE_at(he) (he).at /* access cached at, read-only */ #define ANHE_at_cache(he) (he).at = (he).w->at /* update at from watcher */ #else /* a heap element */ typedef WT ANHE; #define ANHE_w(he) (he) #define ANHE_at(he) (he)->at #define ANHE_at_cache(he) #endif #if EV_MULTIPLICITY struct ev_loop { ev_tstamp ev_rt_now; #define ev_rt_now ((loop)->ev_rt_now) #define VAR(name,decl) decl; #include "ev_vars.h" #undef VAR }; #include "ev_wrap.h" static struct ev_loop default_loop_struct; EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */ #else EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */ #define VAR(name,decl) static decl; #include "ev_vars.h" #undef VAR static int ev_default_loop_ptr; #endif #if EV_FEATURE_API # define EV_RELEASE_CB if (expect_false (release_cb)) release_cb (EV_A) # define EV_ACQUIRE_CB if (expect_false (acquire_cb)) acquire_cb (EV_A) # define EV_INVOKE_PENDING invoke_cb (EV_A) #else # define EV_RELEASE_CB (void)0 # define EV_ACQUIRE_CB (void)0 # define EV_INVOKE_PENDING ev_invoke_pending (EV_A) #endif #define EVBREAK_RECURSE 0x80 /*****************************************************************************/ #ifndef EV_HAVE_EV_TIME ev_tstamp ev_time (void) EV_THROW { #if EV_USE_REALTIME if (expect_true (have_realtime)) { struct timespec ts; clock_gettime (CLOCK_REALTIME, &ts); return ts.tv_sec + ts.tv_nsec * 1e-9; } #endif struct timeval tv; gettimeofday (&tv, 0); return tv.tv_sec + tv.tv_usec * 1e-6; } #endif inline_size ev_tstamp get_clock (void) { #if EV_USE_MONOTONIC if (expect_true (have_monotonic)) { struct timespec ts; clock_gettime (CLOCK_MONOTONIC, &ts); return ts.tv_sec + ts.tv_nsec * 1e-9; } #endif return ev_time (); } #if EV_MULTIPLICITY ev_tstamp ev_now (EV_P) EV_THROW { return ev_rt_now; } #endif void ev_sleep (ev_tstamp delay) EV_THROW { if (delay > 0.) { #if EV_USE_NANOSLEEP struct timespec ts; EV_TS_SET (ts, delay); nanosleep (&ts, 0); #elif defined _WIN32 Sleep ((unsigned long)(delay * 1e3)); #else struct timeval tv; /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */ /* something not guaranteed by newer posix versions, but guaranteed */ /* by older ones */ EV_TV_SET (tv, delay); select (0, 0, 0, 0, &tv); #endif } } /*****************************************************************************/ #define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */ /* find a suitable new size for the given array, */ /* hopefully by rounding to a nice-to-malloc size */ inline_size int array_nextsize (int elem, int cur, int cnt) { int ncur = cur + 1; do ncur <<= 1; while (cnt > ncur); /* if size is large, round to MALLOC_ROUND - 4 * longs to accommodate malloc overhead */ if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4) { ncur *= elem; ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1); ncur = ncur - sizeof (void *) * 4; ncur /= elem; } return ncur; } noinline ecb_cold static void * array_realloc (int elem, void *base, int *cur, int cnt) { *cur = array_nextsize (elem, *cur, cnt); return ev_realloc (base, elem * *cur); } #define array_init_zero(base,count) \ memset ((void *)(base), 0, sizeof (*(base)) * (count)) #define array_needsize(type,base,cur,cnt,init) \ if (expect_false ((cnt) > (cur))) \ { \ ecb_unused int ocur_ = (cur); \ (base) = (type *)array_realloc \ (sizeof (type), (base), &(cur), (cnt)); \ init ((base) + (ocur_), (cur) - ocur_); \ } #if 0 #define array_slim(type,stem) \ if (stem ## max < array_roundsize (stem ## cnt >> 2)) \ { \ stem ## max = array_roundsize (stem ## cnt >> 1); \ base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\ fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\ } #endif #define array_free(stem, idx) \ ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0 /*****************************************************************************/ /* dummy callback for pending events */ noinline static void pendingcb (EV_P_ ev_prepare *w, int revents) { } noinline void ev_feed_event (EV_P_ void *w, int revents) EV_THROW { W w_ = (W)w; int pri = ABSPRI (w_); if (expect_false (w_->pending)) pendings [pri][w_->pending - 1].events |= revents; else { w_->pending = ++pendingcnt [pri]; array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, EMPTY2); pendings [pri][w_->pending - 1].w = w_; pendings [pri][w_->pending - 1].events = revents; } pendingpri = NUMPRI - 1; } inline_speed void feed_reverse (EV_P_ W w) { array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, EMPTY2); rfeeds [rfeedcnt++] = w; } inline_size void feed_reverse_done (EV_P_ int revents) { do ev_feed_event (EV_A_ rfeeds [--rfeedcnt], revents); while (rfeedcnt); } inline_speed void queue_events (EV_P_ W *events, int eventcnt, int type) { int i; for (i = 0; i < eventcnt; ++i) ev_feed_event (EV_A_ events [i], type); } /*****************************************************************************/ inline_speed void fd_event_nocheck (EV_P_ int fd, int revents) { ANFD *anfd = anfds + fd; ev_io *w; for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next) { int ev = w->events & revents; if (ev) ev_feed_event (EV_A_ (W)w, ev); } } /* do not submit kernel events for fds that have reify set */ /* because that means they changed while we were polling for new events */ inline_speed void fd_event (EV_P_ int fd, int revents) { ANFD *anfd = anfds + fd; if (expect_true (!anfd->reify)) fd_event_nocheck (EV_A_ fd, revents); } void ev_feed_fd_event (EV_P_ int fd, int revents) EV_THROW { if (fd >= 0 && fd < anfdmax) fd_event_nocheck (EV_A_ fd, revents); } /* make sure the external fd watch events are in-sync */ /* with the kernel/libev internal state */ inline_size void fd_reify (EV_P) { int i; #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP for (i = 0; i < fdchangecnt; ++i) { int fd = fdchanges [i]; ANFD *anfd = anfds + fd; if (anfd->reify & EV__IOFDSET && anfd->head) { SOCKET handle = EV_FD_TO_WIN32_HANDLE (fd); if (handle != anfd->handle) { unsigned long arg; assert (("libev: only socket fds supported in this configuration", ioctlsocket (handle, FIONREAD, &arg) == 0)); /* handle changed, but fd didn't - we need to do it in two steps */ backend_modify (EV_A_ fd, anfd->events, 0); anfd->events = 0; anfd->handle = handle; } } } #endif for (i = 0; i < fdchangecnt; ++i) { int fd = fdchanges [i]; ANFD *anfd = anfds + fd; ev_io *w; unsigned char o_events = anfd->events; unsigned char o_reify = anfd->reify; anfd->reify = 0; /*if (expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */ { anfd->events = 0; for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next) anfd->events |= (unsigned char)w->events; if (o_events != anfd->events) o_reify = EV__IOFDSET; /* actually |= */ } if (o_reify & EV__IOFDSET) backend_modify (EV_A_ fd, o_events, anfd->events); } fdchangecnt = 0; } /* something about the given fd changed */ inline_size void fd_change (EV_P_ int fd, int flags) { unsigned char reify = anfds [fd].reify; anfds [fd].reify |= flags; if (expect_true (!reify)) { ++fdchangecnt; array_needsize (int, fdchanges, fdchangemax, fdchangecnt, EMPTY2); fdchanges [fdchangecnt - 1] = fd; } } /* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */ inline_speed ecb_cold void fd_kill (EV_P_ int fd) { ev_io *w; while ((w = (ev_io *)anfds [fd].head)) { ev_io_stop (EV_A_ w); ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE); } } /* check whether the given fd is actually valid, for error recovery */ inline_size ecb_cold int fd_valid (int fd) { #ifdef _WIN32 return EV_FD_TO_WIN32_HANDLE (fd) != -1; #else return fcntl (fd, F_GETFD) != -1; #endif } /* called on EBADF to verify fds */ noinline ecb_cold static void fd_ebadf (EV_P) { int fd; for (fd = 0; fd < anfdmax; ++fd) if (anfds [fd].events) if (!fd_valid (fd) && errno == EBADF) fd_kill (EV_A_ fd); } /* called on ENOMEM in select/poll to kill some fds and retry */ noinline ecb_cold static void fd_enomem (EV_P) { int fd; for (fd = anfdmax; fd--; ) if (anfds [fd].events) { fd_kill (EV_A_ fd); break; } } /* usually called after fork if backend needs to re-arm all fds from scratch */ noinline static void fd_rearm_all (EV_P) { int fd; for (fd = 0; fd < anfdmax; ++fd) if (anfds [fd].events) { anfds [fd].events = 0; anfds [fd].emask = 0; fd_change (EV_A_ fd, EV__IOFDSET | EV_ANFD_REIFY); } } /* used to prepare libev internal fd's */ /* this is not fork-safe */ inline_speed void fd_intern (int fd) { #ifdef _WIN32 unsigned long arg = 1; ioctlsocket (EV_FD_TO_WIN32_HANDLE (fd), FIONBIO, &arg); #else fcntl (fd, F_SETFD, FD_CLOEXEC); fcntl (fd, F_SETFL, O_NONBLOCK); #endif } /*****************************************************************************/ /* * the heap functions want a real array index. array index 0 is guaranteed to not * be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives * the branching factor of the d-tree. */ /* * at the moment we allow libev the luxury of two heaps, * a small-code-size 2-heap one and a ~1.5kb larger 4-heap * which is more cache-efficient. * the difference is about 5% with 50000+ watchers. */ #if EV_USE_4HEAP #define DHEAP 4 #define HEAP0 (DHEAP - 1) /* index of first element in heap */ #define HPARENT(k) ((((k) - HEAP0 - 1) / DHEAP) + HEAP0) #define UPHEAP_DONE(p,k) ((p) == (k)) /* away from the root */ inline_speed void downheap (ANHE *heap, int N, int k) { ANHE he = heap [k]; ANHE *E = heap + N + HEAP0; for (;;) { ev_tstamp minat; ANHE *minpos; ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1; /* find minimum child */ if (expect_true (pos + DHEAP - 1 < E)) { /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); } else if (pos < E) { /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); } else break; if (ANHE_at (he) <= minat) break; heap [k] = *minpos; ev_active (ANHE_w (*minpos)) = k; k = minpos - heap; } heap [k] = he; ev_active (ANHE_w (he)) = k; } #else /* 4HEAP */ #define HEAP0 1 #define HPARENT(k) ((k) >> 1) #define UPHEAP_DONE(p,k) (!(p)) /* away from the root */ inline_speed void downheap (ANHE *heap, int N, int k) { ANHE he = heap [k]; for (;;) { int c = k << 1; if (c >= N + HEAP0) break; c += c + 1 < N + HEAP0 && ANHE_at (heap [c]) > ANHE_at (heap [c + 1]) ? 1 : 0; if (ANHE_at (he) <= ANHE_at (heap [c])) break; heap [k] = heap [c]; ev_active (ANHE_w (heap [k])) = k; k = c; } heap [k] = he; ev_active (ANHE_w (he)) = k; } #endif /* towards the root */ inline_speed void upheap (ANHE *heap, int k) { ANHE he = heap [k]; for (;;) { int p = HPARENT (k); if (UPHEAP_DONE (p, k) || ANHE_at (heap [p]) <= ANHE_at (he)) break; heap [k] = heap [p]; ev_active (ANHE_w (heap [k])) = k; k = p; } heap [k] = he; ev_active (ANHE_w (he)) = k; } /* move an element suitably so it is in a correct place */ inline_size void adjustheap (ANHE *heap, int N, int k) { if (k > HEAP0 && ANHE_at (heap [k]) <= ANHE_at (heap [HPARENT (k)])) upheap (heap, k); else downheap (heap, N, k); } /* rebuild the heap: this function is used only once and executed rarely */ inline_size void reheap (ANHE *heap, int N) { int i; /* we don't use floyds algorithm, upheap is simpler and is more cache-efficient */ /* also, this is easy to implement and correct for both 2-heaps and 4-heaps */ for (i = 0; i < N; ++i) upheap (heap, i + HEAP0); } /*****************************************************************************/ /* associate signal watchers to a signal signal */ typedef struct { EV_ATOMIC_T pending; #if EV_MULTIPLICITY EV_P; #endif WL head; } ANSIG; static ANSIG signals [EV_NSIG - 1]; /*****************************************************************************/ #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE noinline ecb_cold static void evpipe_init (EV_P) { if (!ev_is_active (&pipe_w)) { int fds [2]; # if EV_USE_EVENTFD fds [0] = -1; fds [1] = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC); if (fds [1] < 0 && errno == EINVAL) fds [1] = eventfd (0, 0); if (fds [1] < 0) # endif { while (pipe (fds)) ev_syserr ("(libev) error creating signal/async pipe"); fd_intern (fds [0]); } evpipe [0] = fds [0]; if (evpipe [1] < 0) evpipe [1] = fds [1]; /* first call, set write fd */ else { /* on subsequent calls, do not change evpipe [1] */ /* so that evpipe_write can always rely on its value. */ /* this branch does not do anything sensible on windows, */ /* so must not be executed on windows */ dup2 (fds [1], evpipe [1]); close (fds [1]); } fd_intern (evpipe [1]); ev_io_set (&pipe_w, evpipe [0] < 0 ? evpipe [1] : evpipe [0], EV_READ); ev_io_start (EV_A_ &pipe_w); ev_unref (EV_A); /* watcher should not keep loop alive */ } } inline_speed void evpipe_write (EV_P_ EV_ATOMIC_T *flag) { ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */ if (expect_true (*flag)) return; *flag = 1; ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */ pipe_write_skipped = 1; ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */ if (pipe_write_wanted) { int old_errno; pipe_write_skipped = 0; ECB_MEMORY_FENCE_RELEASE; old_errno = errno; /* save errno because write will clobber it */ #if EV_USE_EVENTFD if (evpipe [0] < 0) { uint64_t counter = 1; write (evpipe [1], &counter, sizeof (uint64_t)); } else #endif { #ifdef _WIN32 WSABUF buf; DWORD sent; buf.buf = (char *)&buf; buf.len = 1; WSASend (EV_FD_TO_WIN32_HANDLE (evpipe [1]), &buf, 1, &sent, 0, 0, 0); #else write (evpipe [1], &(evpipe [1]), 1); #endif } errno = old_errno; } } /* called whenever the libev signal pipe */ /* got some events (signal, async) */ static void pipecb (EV_P_ ev_io *iow, int revents) { int i; if (revents & EV_READ) { #if EV_USE_EVENTFD if (evpipe [0] < 0) { uint64_t counter; read (evpipe [1], &counter, sizeof (uint64_t)); } else #endif { char dummy[4]; #ifdef _WIN32 WSABUF buf; DWORD recvd; DWORD flags = 0; buf.buf = dummy; buf.len = sizeof (dummy); WSARecv (EV_FD_TO_WIN32_HANDLE (evpipe [0]), &buf, 1, &recvd, &flags, 0, 0); #else read (evpipe [0], &dummy, sizeof (dummy)); #endif } } pipe_write_skipped = 0; ECB_MEMORY_FENCE; /* push out skipped, acquire flags */ #if EV_SIGNAL_ENABLE if (sig_pending) { sig_pending = 0; ECB_MEMORY_FENCE; for (i = EV_NSIG - 1; i--; ) if (expect_false (signals [i].pending)) ev_feed_signal_event (EV_A_ i + 1); } #endif #if EV_ASYNC_ENABLE if (async_pending) { async_pending = 0; ECB_MEMORY_FENCE; for (i = asynccnt; i--; ) if (asyncs [i]->sent) { asyncs [i]->sent = 0; ECB_MEMORY_FENCE_RELEASE; ev_feed_event (EV_A_ asyncs [i], EV_ASYNC); } } #endif } /*****************************************************************************/ void ev_feed_signal (int signum) EV_THROW { #if EV_MULTIPLICITY EV_P; ECB_MEMORY_FENCE_ACQUIRE; EV_A = signals [signum - 1].loop; if (!EV_A) return; #endif signals [signum - 1].pending = 1; evpipe_write (EV_A_ &sig_pending); } static void ev_sighandler (int signum) { #ifdef _WIN32 signal (signum, ev_sighandler); #endif ev_feed_signal (signum); } noinline void ev_feed_signal_event (EV_P_ int signum) EV_THROW { WL w; if (expect_false (signum <= 0 || signum >= EV_NSIG)) return; --signum; #if EV_MULTIPLICITY /* it is permissible to try to feed a signal to the wrong loop */ /* or, likely more useful, feeding a signal nobody is waiting for */ if (expect_false (signals [signum].loop != EV_A)) return; #endif signals [signum].pending = 0; ECB_MEMORY_FENCE_RELEASE; for (w = signals [signum].head; w; w = w->next) ev_feed_event (EV_A_ (W)w, EV_SIGNAL); } #if EV_USE_SIGNALFD static void sigfdcb (EV_P_ ev_io *iow, int revents) { struct signalfd_siginfo si[2], *sip; /* these structs are big */ for (;;) { ssize_t res = read (sigfd, si, sizeof (si)); /* not ISO-C, as res might be -1, but works with SuS */ for (sip = si; (char *)sip < (char *)si + res; ++sip) ev_feed_signal_event (EV_A_ sip->ssi_signo); if (res < (ssize_t)sizeof (si)) break; } } #endif #endif /*****************************************************************************/ #if EV_CHILD_ENABLE static WL childs [EV_PID_HASHSIZE]; static ev_signal childev; #ifndef WIFCONTINUED # define WIFCONTINUED(status) 0 #endif /* handle a single child status event */ inline_speed void child_reap (EV_P_ int chain, int pid, int status) { ev_child *w; int traced = WIFSTOPPED (status) || WIFCONTINUED (status); for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next) { if ((w->pid == pid || !w->pid) && (!traced || (w->flags & 1))) { ev_set_priority (w, EV_MAXPRI); /* need to do it *now*, this *must* be the same prio as the signal watcher itself */ w->rpid = pid; w->rstatus = status; ev_feed_event (EV_A_ (W)w, EV_CHILD); } } } #ifndef WCONTINUED # define WCONTINUED 0 #endif /* called on sigchld etc., calls waitpid */ static void childcb (EV_P_ ev_signal *sw, int revents) { int pid, status; /* some systems define WCONTINUED but then fail to support it (linux 2.4) */ if (0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED))) if (!WCONTINUED || errno != EINVAL || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED))) return; /* make sure we are called again until all children have been reaped */ /* we need to do it this way so that the callback gets called before we continue */ ev_feed_event (EV_A_ (W)sw, EV_SIGNAL); child_reap (EV_A_ pid, pid, status); if ((EV_PID_HASHSIZE) > 1) child_reap (EV_A_ 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */ } #endif /*****************************************************************************/ #if EV_USE_IOCP # include "ev_iocp.c" #endif #if EV_USE_PORT # include "ev_port.c" #endif #if EV_USE_KQUEUE # include "ev_kqueue.c" #endif #if EV_USE_EPOLL # include "ev_epoll.c" #endif #if EV_USE_POLL # include "ev_poll.c" #endif #if EV_USE_SELECT # include "ev_select.c" #endif ecb_cold int ev_version_major (void) EV_THROW { return EV_VERSION_MAJOR; } ecb_cold int ev_version_minor (void) EV_THROW { return EV_VERSION_MINOR; } /* return true if we are running with elevated privileges and should ignore env variables */ inline_size ecb_cold int enable_secure (void) { #ifdef _WIN32 return 0; #else return getuid () != geteuid () || getgid () != getegid (); #endif } ecb_cold unsigned int ev_supported_backends (void) EV_THROW { unsigned int flags = 0; if (EV_USE_PORT ) flags |= EVBACKEND_PORT; if (EV_USE_KQUEUE) flags |= EVBACKEND_KQUEUE; if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL; if (EV_USE_POLL ) flags |= EVBACKEND_POLL; if (EV_USE_SELECT) flags |= EVBACKEND_SELECT; return flags; } ecb_cold unsigned int ev_recommended_backends (void) EV_THROW { unsigned int flags = ev_supported_backends (); #ifndef __NetBSD__ /* kqueue is borked on everything but netbsd apparently */ /* it usually doesn't work correctly on anything but sockets and pipes */ flags &= ~EVBACKEND_KQUEUE; #endif #ifdef __APPLE__ /* only select works correctly on that "unix-certified" platform */ flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */ flags &= ~EVBACKEND_POLL; /* poll is based on kqueue from 10.5 onwards */ #endif #ifdef __FreeBSD__ flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */ #endif return flags; } ecb_cold unsigned int ev_embeddable_backends (void) EV_THROW { int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT; /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ flags &= ~EVBACKEND_EPOLL; return flags; } unsigned int ev_backend (EV_P) EV_THROW { return backend; } #if EV_FEATURE_API unsigned int ev_iteration (EV_P) EV_THROW { return loop_count; } unsigned int ev_depth (EV_P) EV_THROW { return loop_depth; } void ev_set_io_collect_interval (EV_P_ ev_tstamp interval) EV_THROW { io_blocktime = interval; } void ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) EV_THROW { timeout_blocktime = interval; } void ev_set_userdata (EV_P_ void *data) EV_THROW { userdata = data; } void * ev_userdata (EV_P) EV_THROW { return userdata; } void ev_set_invoke_pending_cb (EV_P_ ev_loop_callback invoke_pending_cb) EV_THROW { invoke_cb = invoke_pending_cb; } void ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_THROW, void (*acquire)(EV_P) EV_THROW) EV_THROW { release_cb = release; acquire_cb = acquire; } #endif /* initialise a loop structure, must be zero-initialised */ noinline ecb_cold static void loop_init (EV_P_ unsigned int flags) EV_THROW { if (!backend) { origflags = flags; #if EV_USE_REALTIME if (!have_realtime) { struct timespec ts; if (!clock_gettime (CLOCK_REALTIME, &ts)) have_realtime = 1; } #endif #if EV_USE_MONOTONIC if (!have_monotonic) { struct timespec ts; if (!clock_gettime (CLOCK_MONOTONIC, &ts)) have_monotonic = 1; } #endif /* pid check not overridable via env */ #ifndef _WIN32 if (flags & EVFLAG_FORKCHECK) curpid = getpid (); #endif if (!(flags & EVFLAG_NOENV) && !enable_secure () && getenv ("LIBEV_FLAGS")) flags = atoi (getenv ("LIBEV_FLAGS")); ev_rt_now = ev_time (); mn_now = get_clock (); now_floor = mn_now; rtmn_diff = ev_rt_now - mn_now; #if EV_FEATURE_API invoke_cb = ev_invoke_pending; #endif io_blocktime = 0.; timeout_blocktime = 0.; backend = 0; backend_fd = -1; sig_pending = 0; #if EV_ASYNC_ENABLE async_pending = 0; #endif pipe_write_skipped = 0; pipe_write_wanted = 0; evpipe [0] = -1; evpipe [1] = -1; #if EV_USE_INOTIFY fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2; #endif #if EV_USE_SIGNALFD sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1; #endif if (!(flags & EVBACKEND_MASK)) flags |= ev_recommended_backends (); #if EV_USE_IOCP if (!backend && (flags & EVBACKEND_IOCP )) backend = iocp_init (EV_A_ flags); #endif #if EV_USE_PORT if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags); #endif #if EV_USE_KQUEUE if (!backend && (flags & EVBACKEND_KQUEUE)) backend = kqueue_init (EV_A_ flags); #endif #if EV_USE_EPOLL if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags); #endif #if EV_USE_POLL if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags); #endif #if EV_USE_SELECT if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init (EV_A_ flags); #endif ev_prepare_init (&pending_w, pendingcb); #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE ev_init (&pipe_w, pipecb); ev_set_priority (&pipe_w, EV_MAXPRI); #endif } } /* free up a loop structure */ ecb_cold void ev_loop_destroy (EV_P) { int i; #if EV_MULTIPLICITY /* mimic free (0) */ if (!EV_A) return; #endif #if EV_CLEANUP_ENABLE /* queue cleanup watchers (and execute them) */ if (expect_false (cleanupcnt)) { queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP); EV_INVOKE_PENDING; } #endif #if EV_CHILD_ENABLE if (ev_is_default_loop (EV_A) && ev_is_active (&childev)) { ev_ref (EV_A); /* child watcher */ ev_signal_stop (EV_A_ &childev); } #endif if (ev_is_active (&pipe_w)) { /*ev_ref (EV_A);*/ /*ev_io_stop (EV_A_ &pipe_w);*/ if (evpipe [0] >= 0) EV_WIN32_CLOSE_FD (evpipe [0]); if (evpipe [1] >= 0) EV_WIN32_CLOSE_FD (evpipe [1]); } #if EV_USE_SIGNALFD if (ev_is_active (&sigfd_w)) close (sigfd); #endif #if EV_USE_INOTIFY if (fs_fd >= 0) close (fs_fd); #endif if (backend_fd >= 0) close (backend_fd); #if EV_USE_IOCP if (backend == EVBACKEND_IOCP ) iocp_destroy (EV_A); #endif #if EV_USE_PORT if (backend == EVBACKEND_PORT ) port_destroy (EV_A); #endif #if EV_USE_KQUEUE if (backend == EVBACKEND_KQUEUE) kqueue_destroy (EV_A); #endif #if EV_USE_EPOLL if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A); #endif #if EV_USE_POLL if (backend == EVBACKEND_POLL ) poll_destroy (EV_A); #endif #if EV_USE_SELECT if (backend == EVBACKEND_SELECT) select_destroy (EV_A); #endif for (i = NUMPRI; i--; ) { array_free (pending, [i]); #if EV_IDLE_ENABLE array_free (idle, [i]); #endif } ev_free (anfds); anfds = 0; anfdmax = 0; /* have to use the microsoft-never-gets-it-right macro */ array_free (rfeed, EMPTY); array_free (fdchange, EMPTY); array_free (timer, EMPTY); #if EV_PERIODIC_ENABLE array_free (periodic, EMPTY); #endif #if EV_FORK_ENABLE array_free (fork, EMPTY); #endif #if EV_CLEANUP_ENABLE array_free (cleanup, EMPTY); #endif array_free (prepare, EMPTY); array_free (check, EMPTY); #if EV_ASYNC_ENABLE array_free (async, EMPTY); #endif backend = 0; #if EV_MULTIPLICITY if (ev_is_default_loop (EV_A)) #endif ev_default_loop_ptr = 0; #if EV_MULTIPLICITY else ev_free (EV_A); #endif } #if EV_USE_INOTIFY inline_size void infy_fork (EV_P); #endif inline_size void loop_fork (EV_P) { #if EV_USE_PORT if (backend == EVBACKEND_PORT ) port_fork (EV_A); #endif #if EV_USE_KQUEUE if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A); #endif #if EV_USE_EPOLL if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A); #endif #if EV_USE_INOTIFY infy_fork (EV_A); #endif #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE if (ev_is_active (&pipe_w) && postfork != 2) { /* pipe_write_wanted must be false now, so modifying fd vars should be safe */ ev_ref (EV_A); ev_io_stop (EV_A_ &pipe_w); if (evpipe [0] >= 0) EV_WIN32_CLOSE_FD (evpipe [0]); evpipe_init (EV_A); /* iterate over everything, in case we missed something before */ ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); } #endif postfork = 0; } #if EV_MULTIPLICITY ecb_cold struct ev_loop * ev_loop_new (unsigned int flags) EV_THROW { EV_P = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop)); memset (EV_A, 0, sizeof (struct ev_loop)); loop_init (EV_A_ flags); if (ev_backend (EV_A)) return EV_A; ev_free (EV_A); return 0; } #endif /* multiplicity */ #if EV_VERIFY noinline ecb_cold static void verify_watcher (EV_P_ W w) { assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI)); if (w->pending) assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w)); } noinline ecb_cold static void verify_heap (EV_P_ ANHE *heap, int N) { int i; for (i = HEAP0; i < N + HEAP0; ++i) { assert (("libev: active index mismatch in heap", ev_active (ANHE_w (heap [i])) == i)); assert (("libev: heap condition violated", i == HEAP0 || ANHE_at (heap [HPARENT (i)]) <= ANHE_at (heap [i]))); assert (("libev: heap at cache mismatch", ANHE_at (heap [i]) == ev_at (ANHE_w (heap [i])))); verify_watcher (EV_A_ (W)ANHE_w (heap [i])); } } noinline ecb_cold static void array_verify (EV_P_ W *ws, int cnt) { while (cnt--) { assert (("libev: active index mismatch", ev_active (ws [cnt]) == cnt + 1)); verify_watcher (EV_A_ ws [cnt]); } } #endif #if EV_FEATURE_API void ecb_cold ev_verify (EV_P) EV_THROW { #if EV_VERIFY int i; WL w, w2; assert (activecnt >= -1); assert (fdchangemax >= fdchangecnt); for (i = 0; i < fdchangecnt; ++i) assert (("libev: negative fd in fdchanges", fdchanges [i] >= 0)); assert (anfdmax >= 0); for (i = 0; i < anfdmax; ++i) { int j = 0; for (w = w2 = anfds [i].head; w; w = w->next) { verify_watcher (EV_A_ (W)w); if (j++ & 1) { assert (("libev: io watcher list contains a loop", w != w2)); w2 = w2->next; } assert (("libev: inactive fd watcher on anfd list", ev_active (w) == 1)); assert (("libev: fd mismatch between watcher and anfd", ((ev_io *)w)->fd == i)); } } assert (timermax >= timercnt); verify_heap (EV_A_ timers, timercnt); #if EV_PERIODIC_ENABLE assert (periodicmax >= periodiccnt); verify_heap (EV_A_ periodics, periodiccnt); #endif for (i = NUMPRI; i--; ) { assert (pendingmax [i] >= pendingcnt [i]); #if EV_IDLE_ENABLE assert (idleall >= 0); assert (idlemax [i] >= idlecnt [i]); array_verify (EV_A_ (W *)idles [i], idlecnt [i]); #endif } #if EV_FORK_ENABLE assert (forkmax >= forkcnt); array_verify (EV_A_ (W *)forks, forkcnt); #endif #if EV_CLEANUP_ENABLE assert (cleanupmax >= cleanupcnt); array_verify (EV_A_ (W *)cleanups, cleanupcnt); #endif #if EV_ASYNC_ENABLE assert (asyncmax >= asynccnt); array_verify (EV_A_ (W *)asyncs, asynccnt); #endif #if EV_PREPARE_ENABLE assert (preparemax >= preparecnt); array_verify (EV_A_ (W *)prepares, preparecnt); #endif #if EV_CHECK_ENABLE assert (checkmax >= checkcnt); array_verify (EV_A_ (W *)checks, checkcnt); #endif # if 0 #if EV_CHILD_ENABLE for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next) for (signum = EV_NSIG; signum--; ) if (signals [signum].pending) #endif # endif #endif } #endif #if EV_MULTIPLICITY ecb_cold struct ev_loop * #else int #endif ev_default_loop (unsigned int flags) EV_THROW { if (!ev_default_loop_ptr) { #if EV_MULTIPLICITY EV_P = ev_default_loop_ptr = &default_loop_struct; #else ev_default_loop_ptr = 1; #endif loop_init (EV_A_ flags); if (ev_backend (EV_A)) { #if EV_CHILD_ENABLE ev_signal_init (&childev, childcb, SIGCHLD); ev_set_priority (&childev, EV_MAXPRI); ev_signal_start (EV_A_ &childev); ev_unref (EV_A); /* child watcher should not keep loop alive */ #endif } else ev_default_loop_ptr = 0; } return ev_default_loop_ptr; } void ev_loop_fork (EV_P) EV_THROW { postfork = 1; } /*****************************************************************************/ void ev_invoke (EV_P_ void *w, int revents) { EV_CB_INVOKE ((W)w, revents); } unsigned int ev_pending_count (EV_P) EV_THROW { int pri; unsigned int count = 0; for (pri = NUMPRI; pri--; ) count += pendingcnt [pri]; return count; } noinline void ev_invoke_pending (EV_P) { pendingpri = NUMPRI; while (pendingpri) /* pendingpri possibly gets modified in the inner loop */ { --pendingpri; while (pendingcnt [pendingpri]) { ANPENDING *p = pendings [pendingpri] + --pendingcnt [pendingpri]; p->w->pending = 0; EV_CB_INVOKE (p->w, p->events); EV_FREQUENT_CHECK; } } } #if EV_IDLE_ENABLE /* make idle watchers pending. this handles the "call-idle */ /* only when higher priorities are idle" logic */ inline_size void idle_reify (EV_P) { if (expect_false (idleall)) { int pri; for (pri = NUMPRI; pri--; ) { if (pendingcnt [pri]) break; if (idlecnt [pri]) { queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE); break; } } } } #endif /* make timers pending */ inline_size void timers_reify (EV_P) { EV_FREQUENT_CHECK; if (timercnt && ANHE_at (timers [HEAP0]) < mn_now) { do { ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]); /*assert (("libev: inactive timer on timer heap detected", ev_is_active (w)));*/ /* first reschedule or stop timer */ if (w->repeat) { ev_at (w) += w->repeat; if (ev_at (w) < mn_now) ev_at (w) = mn_now; assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.)); ANHE_at_cache (timers [HEAP0]); downheap (timers, timercnt, HEAP0); } else ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */ EV_FREQUENT_CHECK; feed_reverse (EV_A_ (W)w); } while (timercnt && ANHE_at (timers [HEAP0]) < mn_now); feed_reverse_done (EV_A_ EV_TIMER); } } #if EV_PERIODIC_ENABLE noinline static void periodic_recalc (EV_P_ ev_periodic *w) { ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL; ev_tstamp at = w->offset + interval * ev_floor ((ev_rt_now - w->offset) / interval); /* the above almost always errs on the low side */ while (at <= ev_rt_now) { ev_tstamp nat = at + w->interval; /* when resolution fails us, we use ev_rt_now */ if (expect_false (nat == at)) { at = ev_rt_now; break; } at = nat; } ev_at (w) = at; } /* make periodics pending */ inline_size void periodics_reify (EV_P) { EV_FREQUENT_CHECK; while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now) { do { ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]); /*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/ /* first reschedule or stop timer */ if (w->reschedule_cb) { ev_at (w) = w->reschedule_cb (w, ev_rt_now); assert (("libev: ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now)); ANHE_at_cache (periodics [HEAP0]); downheap (periodics, periodiccnt, HEAP0); } else if (w->interval) { periodic_recalc (EV_A_ w); ANHE_at_cache (periodics [HEAP0]); downheap (periodics, periodiccnt, HEAP0); } else ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */ EV_FREQUENT_CHECK; feed_reverse (EV_A_ (W)w); } while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now); feed_reverse_done (EV_A_ EV_PERIODIC); } } /* simply recalculate all periodics */ /* TODO: maybe ensure that at least one event happens when jumping forward? */ noinline ecb_cold static void periodics_reschedule (EV_P) { int i; /* adjust periodics after time jump */ for (i = HEAP0; i < periodiccnt + HEAP0; ++i) { ev_periodic *w = (ev_periodic *)ANHE_w (periodics [i]); if (w->reschedule_cb) ev_at (w) = w->reschedule_cb (w, ev_rt_now); else if (w->interval) periodic_recalc (EV_A_ w); ANHE_at_cache (periodics [i]); } reheap (periodics, periodiccnt); } #endif /* adjust all timers by a given offset */ noinline ecb_cold static void timers_reschedule (EV_P_ ev_tstamp adjust) { int i; for (i = 0; i < timercnt; ++i) { ANHE *he = timers + i + HEAP0; ANHE_w (*he)->at += adjust; ANHE_at_cache (*he); } } /* fetch new monotonic and realtime times from the kernel */ /* also detect if there was a timejump, and act accordingly */ inline_speed void time_update (EV_P_ ev_tstamp max_block) { #if EV_USE_MONOTONIC if (expect_true (have_monotonic)) { int i; ev_tstamp odiff = rtmn_diff; mn_now = get_clock (); /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */ /* interpolate in the meantime */ if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5)) { ev_rt_now = rtmn_diff + mn_now; return; } now_floor = mn_now; ev_rt_now = ev_time (); /* loop a few times, before making important decisions. * on the choice of "4": one iteration isn't enough, * in case we get preempted during the calls to * ev_time and get_clock. a second call is almost guaranteed * to succeed in that case, though. and looping a few more times * doesn't hurt either as we only do this on time-jumps or * in the unlikely event of having been preempted here. */ for (i = 4; --i; ) { ev_tstamp diff; rtmn_diff = ev_rt_now - mn_now; diff = odiff - rtmn_diff; if (expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP)) return; /* all is well */ ev_rt_now = ev_time (); mn_now = get_clock (); now_floor = mn_now; } /* no timer adjustment, as the monotonic clock doesn't jump */ /* timers_reschedule (EV_A_ rtmn_diff - odiff) */ # if EV_PERIODIC_ENABLE periodics_reschedule (EV_A); # endif } else #endif { ev_rt_now = ev_time (); if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP)) { /* adjust timers. this is easy, as the offset is the same for all of them */ timers_reschedule (EV_A_ ev_rt_now - mn_now); #if EV_PERIODIC_ENABLE periodics_reschedule (EV_A); #endif } mn_now = ev_rt_now; } } /* ########## COOLIO PATCHERY HO! ########## */ #if defined(HAVE_RB_THREAD_BLOCKING_REGION) || defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL) struct ev_poll_args { struct ev_loop *loop; ev_tstamp waittime; }; static VALUE ev_backend_poll(void *ptr) { struct ev_poll_args *args = (struct ev_poll_args *)ptr; struct ev_loop *loop = args->loop; backend_poll (EV_A_ args->waittime); return Qnil; } #endif /* ######################################## */ int ev_run (EV_P_ int flags) { /* ########## COOLIO PATCHERY HO! ########## */ #if defined(HAVE_RB_THREAD_BLOCKING_REGION) || defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL) struct ev_poll_args poll_args; #endif /* ######################################## */ #if EV_FEATURE_API ++loop_depth; #endif assert (("libev: ev_loop recursion during release detected", loop_done != EVBREAK_RECURSE)); loop_done = EVBREAK_CANCEL; EV_INVOKE_PENDING; /* in case we recurse, ensure ordering stays nice and clean */ do { #if EV_VERIFY >= 2 ev_verify (EV_A); #endif #ifndef _WIN32 if (expect_false (curpid)) /* penalise the forking check even more */ if (expect_false (getpid () != curpid)) { curpid = getpid (); postfork = 1; } #endif #if EV_FORK_ENABLE /* we might have forked, so queue fork handlers */ if (expect_false (postfork)) if (forkcnt) { queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); EV_INVOKE_PENDING; } #endif #if EV_PREPARE_ENABLE /* queue prepare watchers (and execute them) */ if (expect_false (preparecnt)) { queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); EV_INVOKE_PENDING; } #endif if (expect_false (loop_done)) break; /* we might have forked, so reify kernel state if necessary */ if (expect_false (postfork)) loop_fork (EV_A); /* update fd-related kernel structures */ fd_reify (EV_A); /* calculate blocking time */ { ev_tstamp waittime = 0.; ev_tstamp sleeptime = 0.; /* remember old timestamp for io_blocktime calculation */ ev_tstamp prev_mn_now = mn_now; /* update time to cancel out callback processing overhead */ time_update (EV_A_ 1e100); /* from now on, we want a pipe-wake-up */ pipe_write_wanted = 1; ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */ if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) { waittime = MAX_BLOCKTIME; if (timercnt) { ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now; if (waittime > to) waittime = to; } #if EV_PERIODIC_ENABLE if (periodiccnt) { ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now; if (waittime > to) waittime = to; } #endif /* don't let timeouts decrease the waittime below timeout_blocktime */ if (expect_false (waittime < timeout_blocktime)) waittime = timeout_blocktime; /* at this point, we NEED to wait, so we have to ensure */ /* to pass a minimum nonzero value to the backend */ if (expect_false (waittime < backend_mintime)) waittime = backend_mintime; /* extra check because io_blocktime is commonly 0 */ if (expect_false (io_blocktime)) { sleeptime = io_blocktime - (mn_now - prev_mn_now); if (sleeptime > waittime - backend_mintime) sleeptime = waittime - backend_mintime; if (expect_true (sleeptime > 0.)) { ev_sleep (sleeptime); waittime -= sleeptime; } } } #if EV_FEATURE_API ++loop_count; #endif assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */ /* ########################## COOLIO PATCHERY HO! ########################## According to the grandwizards of Ruby, locking and unlocking of the global interpreter lock are apparently too powerful a concept for a mere mortal to wield (although redefining what + and - do to numbers is totally cool). And so it came to pass that the only acceptable way to release the global interpreter lock is through a convoluted callback system that thakes a function pointer. While the grandwizard of libev foresaw this sort of scenario, he too attempted to place an API with callbacks on it, one that runs before the system call, and one that runs immediately after. And so it came to pass that trying to wrap everything up in callbacks created two incompatible APIs, Ruby's which releases the global interpreter lock and reacquires it when the callback returns, and libev's, which wants two callbacks, one which runs before the polling operation starts, and one which runs after it finishes. These two systems are incompatible as they both want to use callbacks to solve the same problem, however libev wants to use before/after callbacks, and Ruby wants to use an "around" callback. This presents a significant problem as these two patterns of callbacks are diametrical opposites of each other and thus cannot be composed. And thus we are left with no choice but to patch the internals of libev in order to release a mutex at just the precise moment. This is a great example of a situation where granular locking and unlocking of the GVL is practically required. The goal is to get as close to the system call as possible, and to keep the GVL unlocked for the shortest amount of time possible. Perhaps Ruby could benefit from such an API, e.g: rb_thread_unsafe_dangerous_crazy_blocking_region_begin(...); rb_thread_unsafe_dangerous_crazy_blocking_region_end(...); ####################################################################### */ /* simulate to rb_thread_call_without_gvl using rb_theread_blocking_region. https://github.com/brianmario/mysql2/blob/master/ext/mysql2/client.h#L8 */ #ifndef HAVE_RB_THREAD_CALL_WITHOUT_GVL #ifdef HAVE_RB_THREAD_BLOCKING_REGION #define rb_thread_call_without_gvl(func, data1, ubf, data2) \ rb_thread_blocking_region((rb_blocking_function_t *)func, data1, ubf, data2) #endif #endif #if defined(HAVE_RB_THREAD_BLOCKING_REGION) || defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL) poll_args.loop = loop; poll_args.waittime = waittime; rb_thread_call_without_gvl((void *)ev_backend_poll, (void *)&poll_args, RUBY_UBF_IO, 0); #else backend_poll (EV_A_ waittime); #endif /* ############################# END PATCHERY ############################ */ assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */ pipe_write_wanted = 0; /* just an optimisation, no fence needed */ ECB_MEMORY_FENCE_ACQUIRE; if (pipe_write_skipped) { assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w))); ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); } /* update ev_rt_now, do magic */ time_update (EV_A_ waittime + sleeptime); } /* queue pending timers and reschedule them */ timers_reify (EV_A); /* relative timers called last */ #if EV_PERIODIC_ENABLE periodics_reify (EV_A); /* absolute timers called first */ #endif #if EV_IDLE_ENABLE /* queue idle watchers unless other events are pending */ idle_reify (EV_A); #endif #if EV_CHECK_ENABLE /* queue check watchers, to be executed first */ if (expect_false (checkcnt)) queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); #endif EV_INVOKE_PENDING; } while (expect_true ( activecnt && !loop_done && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT)) )); if (loop_done == EVBREAK_ONE) loop_done = EVBREAK_CANCEL; #if EV_FEATURE_API --loop_depth; #endif return activecnt; } void ev_break (EV_P_ int how) EV_THROW { loop_done = how; } void ev_ref (EV_P) EV_THROW { ++activecnt; } void ev_unref (EV_P) EV_THROW { --activecnt; } void ev_now_update (EV_P) EV_THROW { time_update (EV_A_ 1e100); } void ev_suspend (EV_P) EV_THROW { ev_now_update (EV_A); } void ev_resume (EV_P) EV_THROW { ev_tstamp mn_prev = mn_now; ev_now_update (EV_A); timers_reschedule (EV_A_ mn_now - mn_prev); #if EV_PERIODIC_ENABLE /* TODO: really do this? */ periodics_reschedule (EV_A); #endif } /*****************************************************************************/ /* singly-linked list management, used when the expected list length is short */ inline_size void wlist_add (WL *head, WL elem) { elem->next = *head; *head = elem; } inline_size void wlist_del (WL *head, WL elem) { while (*head) { if (expect_true (*head == elem)) { *head = elem->next; break; } head = &(*head)->next; } } /* internal, faster, version of ev_clear_pending */ inline_speed void clear_pending (EV_P_ W w) { if (w->pending) { pendings [ABSPRI (w)][w->pending - 1].w = (W)&pending_w; w->pending = 0; } } int ev_clear_pending (EV_P_ void *w) EV_THROW { W w_ = (W)w; int pending = w_->pending; if (expect_true (pending)) { ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1; p->w = (W)&pending_w; w_->pending = 0; return p->events; } else return 0; } inline_size void pri_adjust (EV_P_ W w) { int pri = ev_priority (w); pri = pri < EV_MINPRI ? EV_MINPRI : pri; pri = pri > EV_MAXPRI ? EV_MAXPRI : pri; ev_set_priority (w, pri); } inline_speed void ev_start (EV_P_ W w, int active) { pri_adjust (EV_A_ w); w->active = active; ev_ref (EV_A); } inline_size void ev_stop (EV_P_ W w) { ev_unref (EV_A); w->active = 0; } /*****************************************************************************/ noinline void ev_io_start (EV_P_ ev_io *w) EV_THROW { int fd = w->fd; if (expect_false (ev_is_active (w))) return; assert (("libev: ev_io_start called with negative fd", fd >= 0)); assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE)))); EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, 1); array_needsize (ANFD, anfds, anfdmax, fd + 1, array_init_zero); wlist_add (&anfds[fd].head, (WL)w); /* common bug, apparently */ assert (("libev: ev_io_start called with corrupted watcher", ((WL)w)->next != (WL)w)); fd_change (EV_A_ fd, w->events & EV__IOFDSET | EV_ANFD_REIFY); w->events &= ~EV__IOFDSET; EV_FREQUENT_CHECK; } noinline void ev_io_stop (EV_P_ ev_io *w) EV_THROW { clear_pending (EV_A_ (W)w); if (expect_false (!ev_is_active (w))) return; assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax)); EV_FREQUENT_CHECK; wlist_del (&anfds[w->fd].head, (WL)w); ev_stop (EV_A_ (W)w); fd_change (EV_A_ w->fd, EV_ANFD_REIFY); EV_FREQUENT_CHECK; } noinline void ev_timer_start (EV_P_ ev_timer *w) EV_THROW { if (expect_false (ev_is_active (w))) return; ev_at (w) += mn_now; assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); EV_FREQUENT_CHECK; ++timercnt; ev_start (EV_A_ (W)w, timercnt + HEAP0 - 1); array_needsize (ANHE, timers, timermax, ev_active (w) + 1, EMPTY2); ANHE_w (timers [ev_active (w)]) = (WT)w; ANHE_at_cache (timers [ev_active (w)]); upheap (timers, ev_active (w)); EV_FREQUENT_CHECK; /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/ } noinline void ev_timer_stop (EV_P_ ev_timer *w) EV_THROW { clear_pending (EV_A_ (W)w); if (expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w)); --timercnt; if (expect_true (active < timercnt + HEAP0)) { timers [active] = timers [timercnt + HEAP0]; adjustheap (timers, timercnt, active); } } ev_at (w) -= mn_now; ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } noinline void ev_timer_again (EV_P_ ev_timer *w) EV_THROW { EV_FREQUENT_CHECK; clear_pending (EV_A_ (W)w); if (ev_is_active (w)) { if (w->repeat) { ev_at (w) = mn_now + w->repeat; ANHE_at_cache (timers [ev_active (w)]); adjustheap (timers, timercnt, ev_active (w)); } else ev_timer_stop (EV_A_ w); } else if (w->repeat) { ev_at (w) = w->repeat; ev_timer_start (EV_A_ w); } EV_FREQUENT_CHECK; } ev_tstamp ev_timer_remaining (EV_P_ ev_timer *w) EV_THROW { return ev_at (w) - (ev_is_active (w) ? mn_now : 0.); } #if EV_PERIODIC_ENABLE noinline void ev_periodic_start (EV_P_ ev_periodic *w) EV_THROW { if (expect_false (ev_is_active (w))) return; if (w->reschedule_cb) ev_at (w) = w->reschedule_cb (w, ev_rt_now); else if (w->interval) { assert (("libev: ev_periodic_start called with negative interval value", w->interval >= 0.)); periodic_recalc (EV_A_ w); } else ev_at (w) = w->offset; EV_FREQUENT_CHECK; ++periodiccnt; ev_start (EV_A_ (W)w, periodiccnt + HEAP0 - 1); array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, EMPTY2); ANHE_w (periodics [ev_active (w)]) = (WT)w; ANHE_at_cache (periodics [ev_active (w)]); upheap (periodics, ev_active (w)); EV_FREQUENT_CHECK; /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/ } noinline void ev_periodic_stop (EV_P_ ev_periodic *w) EV_THROW { clear_pending (EV_A_ (W)w); if (expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w)); --periodiccnt; if (expect_true (active < periodiccnt + HEAP0)) { periodics [active] = periodics [periodiccnt + HEAP0]; adjustheap (periodics, periodiccnt, active); } } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } noinline void ev_periodic_again (EV_P_ ev_periodic *w) EV_THROW { /* TODO: use adjustheap and recalculation */ ev_periodic_stop (EV_A_ w); ev_periodic_start (EV_A_ w); } #endif #ifndef SA_RESTART # define SA_RESTART 0 #endif #if EV_SIGNAL_ENABLE noinline void ev_signal_start (EV_P_ ev_signal *w) EV_THROW { if (expect_false (ev_is_active (w))) return; assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG)); #if EV_MULTIPLICITY assert (("libev: a signal must not be attached to two different loops", !signals [w->signum - 1].loop || signals [w->signum - 1].loop == loop)); signals [w->signum - 1].loop = EV_A; ECB_MEMORY_FENCE_RELEASE; #endif EV_FREQUENT_CHECK; #if EV_USE_SIGNALFD if (sigfd == -2) { sigfd = signalfd (-1, &sigfd_set, SFD_NONBLOCK | SFD_CLOEXEC); if (sigfd < 0 && errno == EINVAL) sigfd = signalfd (-1, &sigfd_set, 0); /* retry without flags */ if (sigfd >= 0) { fd_intern (sigfd); /* doing it twice will not hurt */ sigemptyset (&sigfd_set); ev_io_init (&sigfd_w, sigfdcb, sigfd, EV_READ); ev_set_priority (&sigfd_w, EV_MAXPRI); ev_io_start (EV_A_ &sigfd_w); ev_unref (EV_A); /* signalfd watcher should not keep loop alive */ } } if (sigfd >= 0) { /* TODO: check .head */ sigaddset (&sigfd_set, w->signum); sigprocmask (SIG_BLOCK, &sigfd_set, 0); signalfd (sigfd, &sigfd_set, 0); } #endif ev_start (EV_A_ (W)w, 1); wlist_add (&signals [w->signum - 1].head, (WL)w); if (!((WL)w)->next) # if EV_USE_SIGNALFD if (sigfd < 0) /*TODO*/ # endif { # ifdef _WIN32 evpipe_init (EV_A); signal (w->signum, ev_sighandler); # else struct sigaction sa; evpipe_init (EV_A); sa.sa_handler = ev_sighandler; sigfillset (&sa.sa_mask); sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */ sigaction (w->signum, &sa, 0); if (origflags & EVFLAG_NOSIGMASK) { sigemptyset (&sa.sa_mask); sigaddset (&sa.sa_mask, w->signum); sigprocmask (SIG_UNBLOCK, &sa.sa_mask, 0); } #endif } EV_FREQUENT_CHECK; } noinline void ev_signal_stop (EV_P_ ev_signal *w) EV_THROW { clear_pending (EV_A_ (W)w); if (expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; wlist_del (&signals [w->signum - 1].head, (WL)w); ev_stop (EV_A_ (W)w); if (!signals [w->signum - 1].head) { #if EV_MULTIPLICITY signals [w->signum - 1].loop = 0; /* unattach from signal */ #endif #if EV_USE_SIGNALFD if (sigfd >= 0) { sigset_t ss; sigemptyset (&ss); sigaddset (&ss, w->signum); sigdelset (&sigfd_set, w->signum); signalfd (sigfd, &sigfd_set, 0); sigprocmask (SIG_UNBLOCK, &ss, 0); } else #endif signal (w->signum, SIG_DFL); } EV_FREQUENT_CHECK; } #endif #if EV_CHILD_ENABLE void ev_child_start (EV_P_ ev_child *w) EV_THROW { #if EV_MULTIPLICITY assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr)); #endif if (expect_false (ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, 1); wlist_add (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w); EV_FREQUENT_CHECK; } void ev_child_stop (EV_P_ ev_child *w) EV_THROW { clear_pending (EV_A_ (W)w); if (expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; wlist_del (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w); ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_STAT_ENABLE # ifdef _WIN32 # undef lstat # define lstat(a,b) _stati64 (a,b) # endif #define DEF_STAT_INTERVAL 5.0074891 #define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */ #define MIN_STAT_INTERVAL 0.1074891 noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents); #if EV_USE_INOTIFY /* the * 2 is to allow for alignment padding, which for some reason is >> 8 */ # define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX) noinline static void infy_add (EV_P_ ev_stat *w) { w->wd = inotify_add_watch (fs_fd, w->path, IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY | IN_CREATE | IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO | IN_DONT_FOLLOW | IN_MASK_ADD); if (w->wd >= 0) { struct statfs sfs; /* now local changes will be tracked by inotify, but remote changes won't */ /* unless the filesystem is known to be local, we therefore still poll */ /* also do poll on <2.6.25, but with normal frequency */ if (!fs_2625) w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL; else if (!statfs (w->path, &sfs) && (sfs.f_type == 0x1373 /* devfs */ || sfs.f_type == 0x4006 /* fat */ || sfs.f_type == 0x4d44 /* msdos */ || sfs.f_type == 0xEF53 /* ext2/3 */ || sfs.f_type == 0x72b6 /* jffs2 */ || sfs.f_type == 0x858458f6 /* ramfs */ || sfs.f_type == 0x5346544e /* ntfs */ || sfs.f_type == 0x3153464a /* jfs */ || sfs.f_type == 0x9123683e /* btrfs */ || sfs.f_type == 0x52654973 /* reiser3 */ || sfs.f_type == 0x01021994 /* tmpfs */ || sfs.f_type == 0x58465342 /* xfs */)) w->timer.repeat = 0.; /* filesystem is local, kernel new enough */ else w->timer.repeat = w->interval ? w->interval : NFS_STAT_INTERVAL; /* remote, use reduced frequency */ } else { /* can't use inotify, continue to stat */ w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL; /* if path is not there, monitor some parent directory for speedup hints */ /* note that exceeding the hardcoded path limit is not a correctness issue, */ /* but an efficiency issue only */ if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096) { char path [4096]; strcpy (path, w->path); do { int mask = IN_MASK_ADD | IN_DELETE_SELF | IN_MOVE_SELF | (errno == EACCES ? IN_ATTRIB : IN_CREATE | IN_MOVED_TO); char *pend = strrchr (path, '/'); if (!pend || pend == path) break; *pend = 0; w->wd = inotify_add_watch (fs_fd, path, mask); } while (w->wd < 0 && (errno == ENOENT || errno == EACCES)); } } if (w->wd >= 0) wlist_add (&fs_hash [w->wd & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w); /* now re-arm timer, if required */ if (ev_is_active (&w->timer)) ev_ref (EV_A); ev_timer_again (EV_A_ &w->timer); if (ev_is_active (&w->timer)) ev_unref (EV_A); } noinline static void infy_del (EV_P_ ev_stat *w) { int slot; int wd = w->wd; if (wd < 0) return; w->wd = -2; slot = wd & ((EV_INOTIFY_HASHSIZE) - 1); wlist_del (&fs_hash [slot].head, (WL)w); /* remove this watcher, if others are watching it, they will rearm */ inotify_rm_watch (fs_fd, wd); } noinline static void infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev) { if (slot < 0) /* overflow, need to check for all hash slots */ for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot) infy_wd (EV_A_ slot, wd, ev); else { WL w_; for (w_ = fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head; w_; ) { ev_stat *w = (ev_stat *)w_; w_ = w_->next; /* lets us remove this watcher and all before it */ if (w->wd == wd || wd == -1) { if (ev->mask & (IN_IGNORED | IN_UNMOUNT | IN_DELETE_SELF)) { wlist_del (&fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w); w->wd = -1; infy_add (EV_A_ w); /* re-add, no matter what */ } stat_timer_cb (EV_A_ &w->timer, 0); } } } } static void infy_cb (EV_P_ ev_io *w, int revents) { char buf [EV_INOTIFY_BUFSIZE]; int ofs; int len = read (fs_fd, buf, sizeof (buf)); for (ofs = 0; ofs < len; ) { struct inotify_event *ev = (struct inotify_event *)(buf + ofs); infy_wd (EV_A_ ev->wd, ev->wd, ev); ofs += sizeof (struct inotify_event) + ev->len; } } inline_size ecb_cold void ev_check_2625 (EV_P) { /* kernels < 2.6.25 are borked * http://www.ussg.indiana.edu/hypermail/linux/kernel/0711.3/1208.html */ if (ev_linux_version () < 0x020619) return; fs_2625 = 1; } inline_size int infy_newfd (void) { #if defined IN_CLOEXEC && defined IN_NONBLOCK int fd = inotify_init1 (IN_CLOEXEC | IN_NONBLOCK); if (fd >= 0) return fd; #endif return inotify_init (); } inline_size void infy_init (EV_P) { if (fs_fd != -2) return; fs_fd = -1; ev_check_2625 (EV_A); fs_fd = infy_newfd (); if (fs_fd >= 0) { fd_intern (fs_fd); ev_io_init (&fs_w, infy_cb, fs_fd, EV_READ); ev_set_priority (&fs_w, EV_MAXPRI); ev_io_start (EV_A_ &fs_w); ev_unref (EV_A); } } inline_size void infy_fork (EV_P) { int slot; if (fs_fd < 0) return; ev_ref (EV_A); ev_io_stop (EV_A_ &fs_w); close (fs_fd); fs_fd = infy_newfd (); if (fs_fd >= 0) { fd_intern (fs_fd); ev_io_set (&fs_w, fs_fd, EV_READ); ev_io_start (EV_A_ &fs_w); ev_unref (EV_A); } for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot) { WL w_ = fs_hash [slot].head; fs_hash [slot].head = 0; while (w_) { ev_stat *w = (ev_stat *)w_; w_ = w_->next; /* lets us add this watcher */ w->wd = -1; if (fs_fd >= 0) infy_add (EV_A_ w); /* re-add, no matter what */ else { w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL; if (ev_is_active (&w->timer)) ev_ref (EV_A); ev_timer_again (EV_A_ &w->timer); if (ev_is_active (&w->timer)) ev_unref (EV_A); } } } } #endif #ifdef _WIN32 # define EV_LSTAT(p,b) _stati64 (p, b) #else # define EV_LSTAT(p,b) lstat (p, b) #endif void ev_stat_stat (EV_P_ ev_stat *w) EV_THROW { if (lstat (w->path, &w->attr) < 0) w->attr.st_nlink = 0; else if (!w->attr.st_nlink) w->attr.st_nlink = 1; } noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents) { ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); ev_statdata prev = w->attr; ev_stat_stat (EV_A_ w); /* memcmp doesn't work on netbsd, they.... do stuff to their struct stat */ if ( prev.st_dev != w->attr.st_dev || prev.st_ino != w->attr.st_ino || prev.st_mode != w->attr.st_mode || prev.st_nlink != w->attr.st_nlink || prev.st_uid != w->attr.st_uid || prev.st_gid != w->attr.st_gid || prev.st_rdev != w->attr.st_rdev || prev.st_size != w->attr.st_size || prev.st_atime != w->attr.st_atime || prev.st_mtime != w->attr.st_mtime || prev.st_ctime != w->attr.st_ctime ) { /* we only update w->prev on actual differences */ /* in case we test more often than invoke the callback, */ /* to ensure that prev is always different to attr */ w->prev = prev; #if EV_USE_INOTIFY if (fs_fd >= 0) { infy_del (EV_A_ w); infy_add (EV_A_ w); ev_stat_stat (EV_A_ w); /* avoid race... */ } #endif ev_feed_event (EV_A_ w, EV_STAT); } } void ev_stat_start (EV_P_ ev_stat *w) EV_THROW { if (expect_false (ev_is_active (w))) return; ev_stat_stat (EV_A_ w); if (w->interval < MIN_STAT_INTERVAL && w->interval) w->interval = MIN_STAT_INTERVAL; ev_timer_init (&w->timer, stat_timer_cb, 0., w->interval ? w->interval : DEF_STAT_INTERVAL); ev_set_priority (&w->timer, ev_priority (w)); #if EV_USE_INOTIFY infy_init (EV_A); if (fs_fd >= 0) infy_add (EV_A_ w); else #endif { ev_timer_again (EV_A_ &w->timer); ev_unref (EV_A); } ev_start (EV_A_ (W)w, 1); EV_FREQUENT_CHECK; } void ev_stat_stop (EV_P_ ev_stat *w) EV_THROW { clear_pending (EV_A_ (W)w); if (expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; #if EV_USE_INOTIFY infy_del (EV_A_ w); #endif if (ev_is_active (&w->timer)) { ev_ref (EV_A); ev_timer_stop (EV_A_ &w->timer); } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_IDLE_ENABLE void ev_idle_start (EV_P_ ev_idle *w) EV_THROW { if (expect_false (ev_is_active (w))) return; pri_adjust (EV_A_ (W)w); EV_FREQUENT_CHECK; { int active = ++idlecnt [ABSPRI (w)]; ++idleall; ev_start (EV_A_ (W)w, active); array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, EMPTY2); idles [ABSPRI (w)][active - 1] = w; } EV_FREQUENT_CHECK; } void ev_idle_stop (EV_P_ ev_idle *w) EV_THROW { clear_pending (EV_A_ (W)w); if (expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]]; ev_active (idles [ABSPRI (w)][active - 1]) = active; ev_stop (EV_A_ (W)w); --idleall; } EV_FREQUENT_CHECK; } #endif #if EV_PREPARE_ENABLE void ev_prepare_start (EV_P_ ev_prepare *w) EV_THROW { if (expect_false (ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, ++preparecnt); array_needsize (ev_prepare *, prepares, preparemax, preparecnt, EMPTY2); prepares [preparecnt - 1] = w; EV_FREQUENT_CHECK; } void ev_prepare_stop (EV_P_ ev_prepare *w) EV_THROW { clear_pending (EV_A_ (W)w); if (expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); prepares [active - 1] = prepares [--preparecnt]; ev_active (prepares [active - 1]) = active; } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_CHECK_ENABLE void ev_check_start (EV_P_ ev_check *w) EV_THROW { if (expect_false (ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, ++checkcnt); array_needsize (ev_check *, checks, checkmax, checkcnt, EMPTY2); checks [checkcnt - 1] = w; EV_FREQUENT_CHECK; } void ev_check_stop (EV_P_ ev_check *w) EV_THROW { clear_pending (EV_A_ (W)w); if (expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); checks [active - 1] = checks [--checkcnt]; ev_active (checks [active - 1]) = active; } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_EMBED_ENABLE noinline void ev_embed_sweep (EV_P_ ev_embed *w) EV_THROW { ev_run (w->other, EVRUN_NOWAIT); } static void embed_io_cb (EV_P_ ev_io *io, int revents) { ev_embed *w = (ev_embed *)(((char *)io) - offsetof (ev_embed, io)); if (ev_cb (w)) ev_feed_event (EV_A_ (W)w, EV_EMBED); else ev_run (w->other, EVRUN_NOWAIT); } static void embed_prepare_cb (EV_P_ ev_prepare *prepare, int revents) { ev_embed *w = (ev_embed *)(((char *)prepare) - offsetof (ev_embed, prepare)); { EV_P = w->other; while (fdchangecnt) { fd_reify (EV_A); ev_run (EV_A_ EVRUN_NOWAIT); } } } static void embed_fork_cb (EV_P_ ev_fork *fork_w, int revents) { ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork)); ev_embed_stop (EV_A_ w); { EV_P = w->other; ev_loop_fork (EV_A); ev_run (EV_A_ EVRUN_NOWAIT); } ev_embed_start (EV_A_ w); } #if 0 static void embed_idle_cb (EV_P_ ev_idle *idle, int revents) { ev_idle_stop (EV_A_ idle); } #endif void ev_embed_start (EV_P_ ev_embed *w) EV_THROW { if (expect_false (ev_is_active (w))) return; { EV_P = w->other; assert (("libev: loop to be embedded is not embeddable", backend & ev_embeddable_backends ())); ev_io_init (&w->io, embed_io_cb, backend_fd, EV_READ); } EV_FREQUENT_CHECK; ev_set_priority (&w->io, ev_priority (w)); ev_io_start (EV_A_ &w->io); ev_prepare_init (&w->prepare, embed_prepare_cb); ev_set_priority (&w->prepare, EV_MINPRI); ev_prepare_start (EV_A_ &w->prepare); ev_fork_init (&w->fork, embed_fork_cb); ev_fork_start (EV_A_ &w->fork); /*ev_idle_init (&w->idle, e,bed_idle_cb);*/ ev_start (EV_A_ (W)w, 1); EV_FREQUENT_CHECK; } void ev_embed_stop (EV_P_ ev_embed *w) EV_THROW { clear_pending (EV_A_ (W)w); if (expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_io_stop (EV_A_ &w->io); ev_prepare_stop (EV_A_ &w->prepare); ev_fork_stop (EV_A_ &w->fork); ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_FORK_ENABLE void ev_fork_start (EV_P_ ev_fork *w) EV_THROW { if (expect_false (ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, ++forkcnt); array_needsize (ev_fork *, forks, forkmax, forkcnt, EMPTY2); forks [forkcnt - 1] = w; EV_FREQUENT_CHECK; } void ev_fork_stop (EV_P_ ev_fork *w) EV_THROW { clear_pending (EV_A_ (W)w); if (expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); forks [active - 1] = forks [--forkcnt]; ev_active (forks [active - 1]) = active; } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_CLEANUP_ENABLE void ev_cleanup_start (EV_P_ ev_cleanup *w) EV_THROW { if (expect_false (ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, ++cleanupcnt); array_needsize (ev_cleanup *, cleanups, cleanupmax, cleanupcnt, EMPTY2); cleanups [cleanupcnt - 1] = w; /* cleanup watchers should never keep a refcount on the loop */ ev_unref (EV_A); EV_FREQUENT_CHECK; } void ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_THROW { clear_pending (EV_A_ (W)w); if (expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_ref (EV_A); { int active = ev_active (w); cleanups [active - 1] = cleanups [--cleanupcnt]; ev_active (cleanups [active - 1]) = active; } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_ASYNC_ENABLE void ev_async_start (EV_P_ ev_async *w) EV_THROW { if (expect_false (ev_is_active (w))) return; w->sent = 0; evpipe_init (EV_A); EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, ++asynccnt); array_needsize (ev_async *, asyncs, asyncmax, asynccnt, EMPTY2); asyncs [asynccnt - 1] = w; EV_FREQUENT_CHECK; } void ev_async_stop (EV_P_ ev_async *w) EV_THROW { clear_pending (EV_A_ (W)w); if (expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); asyncs [active - 1] = asyncs [--asynccnt]; ev_active (asyncs [active - 1]) = active; } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } void ev_async_send (EV_P_ ev_async *w) EV_THROW { w->sent = 1; evpipe_write (EV_A_ &async_pending); } #endif /*****************************************************************************/ struct ev_once { ev_io io; ev_timer to; void (*cb)(int revents, void *arg); void *arg; }; static void once_cb (EV_P_ struct ev_once *once, int revents) { void (*cb)(int revents, void *arg) = once->cb; void *arg = once->arg; ev_io_stop (EV_A_ &once->io); ev_timer_stop (EV_A_ &once->to); ev_free (once); cb (revents, arg); } static void once_cb_io (EV_P_ ev_io *w, int revents) { struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io)); once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->to)); } static void once_cb_to (EV_P_ ev_timer *w, int revents) { struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to)); once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->io)); } void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) EV_THROW { struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once)); if (expect_false (!once)) { cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMER, arg); return; } once->cb = cb; once->arg = arg; ev_init (&once->io, once_cb_io); if (fd >= 0) { ev_io_set (&once->io, fd, events); ev_io_start (EV_A_ &once->io); } ev_init (&once->to, once_cb_to); if (timeout >= 0.) { ev_timer_set (&once->to, timeout, 0.); ev_timer_start (EV_A_ &once->to); } } /*****************************************************************************/ #if EV_WALK_ENABLE ecb_cold void ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_THROW { int i, j; ev_watcher_list *wl, *wn; if (types & (EV_IO | EV_EMBED)) for (i = 0; i < anfdmax; ++i) for (wl = anfds [i].head; wl; ) { wn = wl->next; #if EV_EMBED_ENABLE if (ev_cb ((ev_io *)wl) == embed_io_cb) { if (types & EV_EMBED) cb (EV_A_ EV_EMBED, ((char *)wl) - offsetof (struct ev_embed, io)); } else #endif #if EV_USE_INOTIFY if (ev_cb ((ev_io *)wl) == infy_cb) ; else #endif if ((ev_io *)wl != &pipe_w) if (types & EV_IO) cb (EV_A_ EV_IO, wl); wl = wn; } if (types & (EV_TIMER | EV_STAT)) for (i = timercnt + HEAP0; i-- > HEAP0; ) #if EV_STAT_ENABLE /*TODO: timer is not always active*/ if (ev_cb ((ev_timer *)ANHE_w (timers [i])) == stat_timer_cb) { if (types & EV_STAT) cb (EV_A_ EV_STAT, ((char *)ANHE_w (timers [i])) - offsetof (struct ev_stat, timer)); } else #endif if (types & EV_TIMER) cb (EV_A_ EV_TIMER, ANHE_w (timers [i])); #if EV_PERIODIC_ENABLE if (types & EV_PERIODIC) for (i = periodiccnt + HEAP0; i-- > HEAP0; ) cb (EV_A_ EV_PERIODIC, ANHE_w (periodics [i])); #endif #if EV_IDLE_ENABLE if (types & EV_IDLE) for (j = NUMPRI; j--; ) for (i = idlecnt [j]; i--; ) cb (EV_A_ EV_IDLE, idles [j][i]); #endif #if EV_FORK_ENABLE if (types & EV_FORK) for (i = forkcnt; i--; ) if (ev_cb (forks [i]) != embed_fork_cb) cb (EV_A_ EV_FORK, forks [i]); #endif #if EV_ASYNC_ENABLE if (types & EV_ASYNC) for (i = asynccnt; i--; ) cb (EV_A_ EV_ASYNC, asyncs [i]); #endif #if EV_PREPARE_ENABLE if (types & EV_PREPARE) for (i = preparecnt; i--; ) # if EV_EMBED_ENABLE if (ev_cb (prepares [i]) != embed_prepare_cb) # endif cb (EV_A_ EV_PREPARE, prepares [i]); #endif #if EV_CHECK_ENABLE if (types & EV_CHECK) for (i = checkcnt; i--; ) cb (EV_A_ EV_CHECK, checks [i]); #endif #if EV_SIGNAL_ENABLE if (types & EV_SIGNAL) for (i = 0; i < EV_NSIG - 1; ++i) for (wl = signals [i].head; wl; ) { wn = wl->next; cb (EV_A_ EV_SIGNAL, wl); wl = wn; } #endif #if EV_CHILD_ENABLE if (types & EV_CHILD) for (i = (EV_PID_HASHSIZE); i--; ) for (wl = childs [i]; wl; ) { wn = wl->next; cb (EV_A_ EV_CHILD, wl); wl = wn; } #endif /* EV_STAT 0x00001000 /* stat data changed */ /* EV_EMBED 0x00010000 /* embedded event loop needs sweep */ } #endif #if EV_MULTIPLICITY #include "ev_wrap.h" #endif cool.io-1.8.1/ext/libev/ev_select.c0000644000004100000410000002416714632135713017150 0ustar www-datawww-data/* * libev select fd activity backend * * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #ifndef _WIN32 /* for unix systems */ # include # ifndef __hpux /* for REAL unix systems */ # include # endif #endif #ifndef EV_SELECT_USE_FD_SET # ifdef NFDBITS # define EV_SELECT_USE_FD_SET 0 # else # define EV_SELECT_USE_FD_SET 1 # endif #endif #if EV_SELECT_IS_WINSOCKET # undef EV_SELECT_USE_FD_SET # define EV_SELECT_USE_FD_SET 1 # undef NFDBITS # define NFDBITS 0 #endif #if !EV_SELECT_USE_FD_SET # define NFDBYTES (NFDBITS / 8) #endif #include #ifdef _WIN32 /* ########## COOLIO PATCHERY HO! ########## Ruby undefs FD_* utilities for own implementation. It converts fd argument into socket handle internally on Windows, so libev should not use Ruby's FD_* utilities. Following FD_* utilities come from MinGW. RubyInstaller is built by MinGW so this should work. */ int PASCAL __WSAFDIsSet(SOCKET,fd_set*); #define EV_WIN_FD_CLR(fd,set) do { u_int __i;\ for (__i = 0; __i < ((fd_set *)(set))->fd_count ; __i++) {\ if (((fd_set *)(set))->fd_array[__i] == (fd)) {\ while (__i < ((fd_set *)(set))->fd_count-1) {\ ((fd_set*)(set))->fd_array[__i] = ((fd_set*)(set))->fd_array[__i+1];\ __i++;\ }\ ((fd_set*)(set))->fd_count--;\ break;\ }\ }\ } while (0) #define EV_WIN_FD_SET(fd, set) do { u_int __i;\ for (__i = 0; __i < ((fd_set *)(set))->fd_count ; __i++) {\ if (((fd_set *)(set))->fd_array[__i] == (fd)) {\ break;\ }\ }\ if (__i == ((fd_set *)(set))->fd_count) {\ if (((fd_set *)(set))->fd_count < FD_SETSIZE) {\ ((fd_set *)(set))->fd_array[__i] = (fd);\ ((fd_set *)(set))->fd_count++;\ }\ }\ } while(0) #define EV_WIN_FD_ZERO(set) (((fd_set *)(set))->fd_count=0) #define EV_WIN_FD_ISSET(fd, set) __WSAFDIsSet((SOCKET)(fd), (fd_set *)(set)) #define EV_WIN_FD_COUNT(set) (((fd_set *)(set))->fd_count) /* ######################################## */ #else #define EV_WIN_FD_CLR FD_CLR #define EV_WIN_FD_SET FD_SET #define EV_WIN_FD_ZERO FD_ZERO #define EV_WIN_FD_ISSET FD_ISSET #endif static void select_modify (EV_P_ int fd, int oev, int nev) { if (oev == nev) return; { #if EV_SELECT_USE_FD_SET #if EV_SELECT_IS_WINSOCKET SOCKET handle = anfds [fd].handle; #else int handle = fd; #endif assert (("libev: fd >= FD_SETSIZE passed to fd_set-based select backend", fd < FD_SETSIZE)); /* FD_SET is broken on windows (it adds the fd to a set twice or more, * which eventually leads to overflows). Need to call it only on changes. */ #if EV_SELECT_IS_WINSOCKET if ((oev ^ nev) & EV_READ) #endif if (nev & EV_READ) EV_WIN_FD_SET (handle, (fd_set *)vec_ri); else EV_WIN_FD_CLR (handle, (fd_set *)vec_ri); #if EV_SELECT_IS_WINSOCKET if ((oev ^ nev) & EV_WRITE) #endif if (nev & EV_WRITE) EV_WIN_FD_SET (handle, (fd_set *)vec_wi); else EV_WIN_FD_CLR (handle, (fd_set *)vec_wi); #else int word = fd / NFDBITS; fd_mask mask = 1UL << (fd % NFDBITS); if (expect_false (vec_max <= word)) { int new_max = word + 1; vec_ri = ev_realloc (vec_ri, new_max * NFDBYTES); vec_ro = ev_realloc (vec_ro, new_max * NFDBYTES); /* could free/malloc */ vec_wi = ev_realloc (vec_wi, new_max * NFDBYTES); vec_wo = ev_realloc (vec_wo, new_max * NFDBYTES); /* could free/malloc */ #ifdef _WIN32 vec_eo = ev_realloc (vec_eo, new_max * NFDBYTES); /* could free/malloc */ #endif for (; vec_max < new_max; ++vec_max) ((fd_mask *)vec_ri) [vec_max] = ((fd_mask *)vec_wi) [vec_max] = 0; } ((fd_mask *)vec_ri) [word] |= mask; if (!(nev & EV_READ)) ((fd_mask *)vec_ri) [word] &= ~mask; ((fd_mask *)vec_wi) [word] |= mask; if (!(nev & EV_WRITE)) ((fd_mask *)vec_wi) [word] &= ~mask; #endif } } static void select_poll (EV_P_ ev_tstamp timeout) { struct timeval tv; int res; int fd_setsize; EV_RELEASE_CB; EV_TV_SET (tv, timeout); #if EV_SELECT_USE_FD_SET fd_setsize = sizeof (fd_set); #else fd_setsize = vec_max * NFDBYTES; #endif memcpy (vec_ro, vec_ri, fd_setsize); memcpy (vec_wo, vec_wi, fd_setsize); #ifdef _WIN32 /* pass in the write set as except set. * the idea behind this is to work around a windows bug that causes * errors to be reported as an exception and not by setting * the writable bit. this is so uncontrollably lame. */ memcpy (vec_eo, vec_wi, fd_setsize); res = select (vec_max * NFDBITS, (fd_set *)vec_ro, (fd_set *)vec_wo, (fd_set *)vec_eo, &tv); #elif EV_SELECT_USE_FD_SET fd_setsize = anfdmax < FD_SETSIZE ? anfdmax : FD_SETSIZE; res = select (fd_setsize, (fd_set *)vec_ro, (fd_set *)vec_wo, 0, &tv); #else res = select (vec_max * NFDBITS, (fd_set *)vec_ro, (fd_set *)vec_wo, 0, &tv); #endif EV_ACQUIRE_CB; if (expect_false (res < 0)) { #if EV_SELECT_IS_WINSOCKET errno = WSAGetLastError (); #endif #ifdef WSABASEERR /* on windows, select returns incompatible error codes, fix this */ if (errno >= WSABASEERR && errno < WSABASEERR + 1000) if (errno == WSAENOTSOCK) errno = EBADF; else errno -= WSABASEERR; #endif #ifdef _WIN32 /* select on windows erroneously returns EINVAL when no fd sets have been * provided (this is documented). what microsoft doesn't tell you that this bug * exists even when the fd sets _are_ provided, so we have to check for this bug * here and emulate by sleeping manually. * we also get EINVAL when the timeout is invalid, but we ignore this case here * and assume that EINVAL always means: you have to wait manually. */ if (errno == EINVAL) { if (timeout) { unsigned long ms = (unsigned long)(timeout * 1e3); SleepEx (ms ? ms : 1, TRUE); } return; } #endif if (errno == EBADF) fd_ebadf (EV_A); else if (errno == ENOMEM && !syserr_cb) fd_enomem (EV_A); else if (errno != EINTR) ev_syserr ("(libev) select"); return; } #if EV_SELECT_USE_FD_SET { int fd; for (fd = 0; fd < anfdmax; ++fd) if (anfds [fd].events) { int events = 0; #if EV_SELECT_IS_WINSOCKET SOCKET handle = anfds [fd].handle; #else int handle = fd; #endif if (EV_WIN_FD_ISSET (handle, (fd_set *)vec_ro)) events |= EV_READ; if (EV_WIN_FD_ISSET (handle, (fd_set *)vec_wo)) events |= EV_WRITE; #ifdef _WIN32 if (EV_WIN_FD_ISSET (handle, (fd_set *)vec_eo)) events |= EV_WRITE; #endif if (expect_true (events)) fd_event (EV_A_ fd, events); } } #else { int word, bit; for (word = vec_max; word--; ) { fd_mask word_r = ((fd_mask *)vec_ro) [word]; fd_mask word_w = ((fd_mask *)vec_wo) [word]; #ifdef _WIN32 word_w |= ((fd_mask *)vec_eo) [word]; #endif if (word_r || word_w) for (bit = NFDBITS; bit--; ) { fd_mask mask = 1UL << bit; int events = 0; events |= word_r & mask ? EV_READ : 0; events |= word_w & mask ? EV_WRITE : 0; if (expect_true (events)) fd_event (EV_A_ word * NFDBITS + bit, events); } } } #endif } inline_size int select_init (EV_P_ int flags) { backend_mintime = 1e-6; backend_modify = select_modify; backend_poll = select_poll; #if EV_SELECT_USE_FD_SET vec_ri = ev_malloc (sizeof (fd_set)); EV_WIN_FD_ZERO ((fd_set *)vec_ri); vec_ro = ev_malloc (sizeof (fd_set)); vec_wi = ev_malloc (sizeof (fd_set)); EV_WIN_FD_ZERO ((fd_set *)vec_wi); vec_wo = ev_malloc (sizeof (fd_set)); #ifdef _WIN32 vec_eo = ev_malloc (sizeof (fd_set)); #endif #else vec_max = 0; vec_ri = 0; vec_ro = 0; vec_wi = 0; vec_wo = 0; #ifdef _WIN32 vec_eo = 0; #endif #endif return EVBACKEND_SELECT; } inline_size void select_destroy (EV_P) { ev_free (vec_ri); ev_free (vec_ro); ev_free (vec_wi); ev_free (vec_wo); #ifdef _WIN32 ev_free (vec_eo); #endif } cool.io-1.8.1/ext/libev/test_libev_win32.c0000644000004100000410000000722614632135713020356 0ustar www-datawww-data // a single header file is required #include #include #include // every watcher type has its own typedef'd struct // with the name ev_TYPE ev_io stdin_watcher; ev_timer timeout_watcher; // all watcher callbacks have a similar signature // this callback is called when data is readable on stdin static void stdin_cb (EV_P_ ev_io *w, int revents) { puts ("stdin ready or done or something"); // for one-shot events, one must manually stop the watcher // with its corresponding stop function. //ev_io_stop (EV_A_ w); // this causes all nested ev_loop's to stop iterating //ev_unloop (EV_A_ EVUNLOOP_ALL); } // another callback, this time for a time-out static void timeout_cb (EV_P_ ev_timer *w, int revents) { puts ("timeout"); // this causes the innermost ev_loop to stop iterating ev_unloop (EV_A_ EVUNLOOP_ONE); } #define FD_SETSIZE 1024 #include #include #include int get_server_fd() { //---------------------- // Initialize Winsock. WSADATA wsaData; int iResult = WSAStartup(MAKEWORD(2,2), &wsaData); if (iResult != NO_ERROR) { printf("Error at WSAStartup()\n"); return 1; } //---------------------- // Create a SOCKET for listening for // incoming connection requests. SOCKET ListenSocket; ListenSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (ListenSocket == INVALID_SOCKET) { printf("Error at socket(): %ld\n", WSAGetLastError()); WSACleanup(); return 1; } printf("socket returned %d\n", ListenSocket); //---------------------- // The sockaddr_in structure specifies the address family, // IP address, and port for the socket that is being bound. sockaddr_in service; service.sin_family = AF_INET; service.sin_addr.s_addr = inet_addr("127.0.0.1"); service.sin_port = htons(4444); if (bind( ListenSocket, (SOCKADDR*) &service, sizeof(service)) == SOCKET_ERROR) { printf("bind() failed.\n"); closesocket(ListenSocket); WSACleanup(); return 1; } //---------------------- // Listen for incoming connection requests. // on the created socket if (listen( ListenSocket, 1 ) == SOCKET_ERROR) { printf("Error listening on socket.\n"); closesocket(ListenSocket); WSACleanup(); return 1; } printf("sock and osf handle are %d %d, error is \n", ListenSocket, _get_osfhandle (ListenSocket)); // -1 is invalid file handle: http://msdn.microsoft.com/en-us/library/ks2530z6.aspx printf("err was %d\n", WSAGetLastError()); //---------------------- return ListenSocket; } int main (void) { struct ev_loop *loopy = ev_default_loop(0); int fd = get_server_fd(); int fd_real = _open_osfhandle(fd, NULL); int conv = _get_osfhandle(fd_real); printf("got server fd %d, loop %d, fd_real %d, that converted %d\n", fd, loopy, fd_real, conv); // accept(fd, NULL, NULL); // initialise an io watcher, then start it // this one will watch for stdin to become readable ev_io_init (&stdin_watcher, stdin_cb, /*STDIN_FILENO*/ conv, EV_READ); ev_io_start (loopy, &stdin_watcher); // initialise a timer watcher, then start it // simple non-repeating 5.5 second timeout //ev_timer_init (&timeout_watcher, timeout_cb, 15.5, 0.); //ev_timer_start (loopy, &timeout_watcher); printf("starting loop\n"); // now wait for events to arrive ev_loop (loopy, 0); // unloop was called, so exit return 0; } cool.io-1.8.1/ext/libev/ev_wrap.h0000644000004100000410000001260614632135713016642 0ustar www-datawww-data/* DO NOT EDIT, automatically generated by update_ev_wrap */ #ifndef EV_WRAP_H #define EV_WRAP_H #define acquire_cb ((loop)->acquire_cb) #define activecnt ((loop)->activecnt) #define anfdmax ((loop)->anfdmax) #define anfds ((loop)->anfds) #define async_pending ((loop)->async_pending) #define asynccnt ((loop)->asynccnt) #define asyncmax ((loop)->asyncmax) #define asyncs ((loop)->asyncs) #define backend ((loop)->backend) #define backend_fd ((loop)->backend_fd) #define backend_mintime ((loop)->backend_mintime) #define backend_modify ((loop)->backend_modify) #define backend_poll ((loop)->backend_poll) #define checkcnt ((loop)->checkcnt) #define checkmax ((loop)->checkmax) #define checks ((loop)->checks) #define cleanupcnt ((loop)->cleanupcnt) #define cleanupmax ((loop)->cleanupmax) #define cleanups ((loop)->cleanups) #define curpid ((loop)->curpid) #define epoll_epermcnt ((loop)->epoll_epermcnt) #define epoll_epermmax ((loop)->epoll_epermmax) #define epoll_eperms ((loop)->epoll_eperms) #define epoll_eventmax ((loop)->epoll_eventmax) #define epoll_events ((loop)->epoll_events) #define evpipe ((loop)->evpipe) #define fdchangecnt ((loop)->fdchangecnt) #define fdchangemax ((loop)->fdchangemax) #define fdchanges ((loop)->fdchanges) #define forkcnt ((loop)->forkcnt) #define forkmax ((loop)->forkmax) #define forks ((loop)->forks) #define fs_2625 ((loop)->fs_2625) #define fs_fd ((loop)->fs_fd) #define fs_hash ((loop)->fs_hash) #define fs_w ((loop)->fs_w) #define idleall ((loop)->idleall) #define idlecnt ((loop)->idlecnt) #define idlemax ((loop)->idlemax) #define idles ((loop)->idles) #define invoke_cb ((loop)->invoke_cb) #define io_blocktime ((loop)->io_blocktime) #define iocp ((loop)->iocp) #define kqueue_changecnt ((loop)->kqueue_changecnt) #define kqueue_changemax ((loop)->kqueue_changemax) #define kqueue_changes ((loop)->kqueue_changes) #define kqueue_eventmax ((loop)->kqueue_eventmax) #define kqueue_events ((loop)->kqueue_events) #define kqueue_fd_pid ((loop)->kqueue_fd_pid) #define loop_count ((loop)->loop_count) #define loop_depth ((loop)->loop_depth) #define loop_done ((loop)->loop_done) #define mn_now ((loop)->mn_now) #define now_floor ((loop)->now_floor) #define origflags ((loop)->origflags) #define pending_w ((loop)->pending_w) #define pendingcnt ((loop)->pendingcnt) #define pendingmax ((loop)->pendingmax) #define pendingpri ((loop)->pendingpri) #define pendings ((loop)->pendings) #define periodiccnt ((loop)->periodiccnt) #define periodicmax ((loop)->periodicmax) #define periodics ((loop)->periodics) #define pipe_w ((loop)->pipe_w) #define pipe_write_skipped ((loop)->pipe_write_skipped) #define pipe_write_wanted ((loop)->pipe_write_wanted) #define pollcnt ((loop)->pollcnt) #define pollidxmax ((loop)->pollidxmax) #define pollidxs ((loop)->pollidxs) #define pollmax ((loop)->pollmax) #define polls ((loop)->polls) #define port_eventmax ((loop)->port_eventmax) #define port_events ((loop)->port_events) #define postfork ((loop)->postfork) #define preparecnt ((loop)->preparecnt) #define preparemax ((loop)->preparemax) #define prepares ((loop)->prepares) #define release_cb ((loop)->release_cb) #define rfeedcnt ((loop)->rfeedcnt) #define rfeedmax ((loop)->rfeedmax) #define rfeeds ((loop)->rfeeds) #define rtmn_diff ((loop)->rtmn_diff) #define sig_pending ((loop)->sig_pending) #define sigfd ((loop)->sigfd) #define sigfd_set ((loop)->sigfd_set) #define sigfd_w ((loop)->sigfd_w) #define timeout_blocktime ((loop)->timeout_blocktime) #define timercnt ((loop)->timercnt) #define timermax ((loop)->timermax) #define timers ((loop)->timers) #define userdata ((loop)->userdata) #define vec_eo ((loop)->vec_eo) #define vec_max ((loop)->vec_max) #define vec_ri ((loop)->vec_ri) #define vec_ro ((loop)->vec_ro) #define vec_wi ((loop)->vec_wi) #define vec_wo ((loop)->vec_wo) #else #undef EV_WRAP_H #undef acquire_cb #undef activecnt #undef anfdmax #undef anfds #undef async_pending #undef asynccnt #undef asyncmax #undef asyncs #undef backend #undef backend_fd #undef backend_mintime #undef backend_modify #undef backend_poll #undef checkcnt #undef checkmax #undef checks #undef cleanupcnt #undef cleanupmax #undef cleanups #undef curpid #undef epoll_epermcnt #undef epoll_epermmax #undef epoll_eperms #undef epoll_eventmax #undef epoll_events #undef evpipe #undef fdchangecnt #undef fdchangemax #undef fdchanges #undef forkcnt #undef forkmax #undef forks #undef fs_2625 #undef fs_fd #undef fs_hash #undef fs_w #undef idleall #undef idlecnt #undef idlemax #undef idles #undef invoke_cb #undef io_blocktime #undef iocp #undef kqueue_changecnt #undef kqueue_changemax #undef kqueue_changes #undef kqueue_eventmax #undef kqueue_events #undef kqueue_fd_pid #undef loop_count #undef loop_depth #undef loop_done #undef mn_now #undef now_floor #undef origflags #undef pending_w #undef pendingcnt #undef pendingmax #undef pendingpri #undef pendings #undef periodiccnt #undef periodicmax #undef periodics #undef pipe_w #undef pipe_write_skipped #undef pipe_write_wanted #undef pollcnt #undef pollidxmax #undef pollidxs #undef pollmax #undef polls #undef port_eventmax #undef port_events #undef postfork #undef preparecnt #undef preparemax #undef prepares #undef release_cb #undef rfeedcnt #undef rfeedmax #undef rfeeds #undef rtmn_diff #undef sig_pending #undef sigfd #undef sigfd_set #undef sigfd_w #undef timeout_blocktime #undef timercnt #undef timermax #undef timers #undef userdata #undef vec_eo #undef vec_max #undef vec_ri #undef vec_ro #undef vec_wi #undef vec_wo #endif cool.io-1.8.1/ext/cool.io/0000755000004100000410000000000014632135713015262 5ustar www-datawww-datacool.io-1.8.1/ext/cool.io/.gitignore0000644000004100000410000000004414632135713017250 0ustar www-datawww-dataMakefile mkmf.log *.o *.so *.bundle cool.io-1.8.1/ext/cool.io/utils.c0000644000004100000410000000521014632135713016564 0ustar www-datawww-data/* * Copyright (C) 2007 Tony Arcieri * You may redistribute this under the terms of the Ruby license. * See LICENSE for details */ #include "ruby.h" #ifdef HAVE_SYS_RESOURCE_H #include #endif #ifdef HAVE_SYS_SYSCTL_H #include #include #endif #ifdef HAVE_SYSCTLBYNAME #include #include #endif static VALUE mCoolio = Qnil; static VALUE cCoolio_Utils = Qnil; static VALUE Coolio_Utils_ncpus(VALUE self); static VALUE Coolio_Utils_maxfds(VALUE self); static VALUE Coolio_Utils_setmaxfds(VALUE self, VALUE max); /* * Assorted utility routines */ void Init_coolio_utils() { mCoolio = rb_define_module("Coolio"); cCoolio_Utils = rb_define_module_under(mCoolio, "Utils"); rb_define_singleton_method(cCoolio_Utils, "ncpus", Coolio_Utils_ncpus, 0); rb_define_singleton_method(cCoolio_Utils, "maxfds", Coolio_Utils_maxfds, 0); rb_define_singleton_method(cCoolio_Utils, "maxfds=", Coolio_Utils_setmaxfds, 1); } /** * call-seq: * Coolio::Utils.ncpus -> Integer * * Return the number of CPUs in the present system */ static VALUE Coolio_Utils_ncpus(VALUE self) { int ncpus = 0; #ifdef HAVE_LINUX_PROCFS #define HAVE_COOLIO_UTILS_NCPUS char buf[512]; FILE *cpuinfo; if(!(cpuinfo = fopen("/proc/cpuinfo", "r"))) rb_sys_fail("fopen"); while(fgets(buf, 512, cpuinfo)) { if(!strncmp(buf, "processor", 9)) ncpus++; } #endif #ifdef HAVE_SYSCTLBYNAME #define HAVE_COOLIO_UTILS_NCPUS size_t size = sizeof(int); if(sysctlbyname("hw.ncpu", &ncpus, &size, NULL, 0)) return INT2NUM(1); #endif #ifndef HAVE_COOLIO_UTILS_NCPUS rb_raise(rb_eRuntimeError, "operation not supported"); #endif return INT2NUM(ncpus); } /** * call-seq: * Coolio::Utils.maxfds -> Integer * * Return the maximum number of files descriptors available to the process */ static VALUE Coolio_Utils_maxfds(VALUE self) { #ifdef HAVE_SYS_RESOURCE_H struct rlimit rlim; if(getrlimit(RLIMIT_NOFILE, &rlim) < 0) rb_sys_fail("getrlimit"); return INT2NUM(rlim.rlim_cur); #endif #ifndef HAVE_SYS_RESOURCE_H rb_raise(rb_eRuntimeError, "operation not supported"); #endif } /** * call-seq: * Coolio::Utils.maxfds=(count) -> Integer * * Set the number of file descriptors available to the process. May require * superuser privileges. */ static VALUE Coolio_Utils_setmaxfds(VALUE self, VALUE max) { #ifdef HAVE_SYS_RESOURCE_H struct rlimit rlim; rlim.rlim_cur = NUM2INT(max); if(setrlimit(RLIMIT_NOFILE, &rlim) < 0) rb_sys_fail("setrlimit"); return max; #endif #ifndef HAVE_SYS_RESOURCE_H rb_raise(rb_eRuntimeError, "operation not supported"); #endif } cool.io-1.8.1/ext/cool.io/watcher.c0000644000004100000410000001740014632135713017065 0ustar www-datawww-data/* * Copyright (C) 2007-10 Tony Arcieri * You may redistribute this under the terms of the Ruby license. * See LICENSE for details */ #include "ruby.h" #include "ev_wrap.h" #include "cool.io.h" static VALUE mCoolio = Qnil; static VALUE cCoolio_Watcher = Qnil; static VALUE Coolio_Watcher_allocate(VALUE klass); static void Coolio_Watcher_mark(struct Coolio_Watcher *watcher); static void Coolio_Watcher_free(struct Coolio_Watcher *watcher); static VALUE Coolio_Watcher_initialize(VALUE self); static VALUE Coolio_Watcher_attach(VALUE self, VALUE loop); static VALUE Coolio_Watcher_detach(VALUE self); static VALUE Coolio_Watcher_enable(VALUE self); static VALUE Coolio_Watcher_disable(VALUE self); static VALUE Coolio_Watcher_evloop(VALUE self); static VALUE Coolio_Watcher_attached(VALUE self); static VALUE Coolio_Watcher_enabled(VALUE self); /* * Watchers are Coolio's event observers. They contain a set of callback * methods prefixed by on_* which fire whenever events occur. * * In order for a watcher to fire events it must be attached to a running * loop. Every watcher has an attach and detach method to control which * loop it's associated with. * * Watchers also have an enable and disable method. This allows a watcher * to temporarily ignore certain events while remaining attached to a given * loop. This is good for watchers which need to be toggled on and off. */ void Init_coolio_watcher() { mCoolio = rb_define_module("Coolio"); cCoolio_Watcher = rb_define_class_under(mCoolio, "Watcher", rb_cObject); rb_define_alloc_func(cCoolio_Watcher, Coolio_Watcher_allocate); rb_define_method(cCoolio_Watcher, "initialize", Coolio_Watcher_initialize, 0); rb_define_method(cCoolio_Watcher, "attach", Coolio_Watcher_attach, 1); rb_define_method(cCoolio_Watcher, "detach", Coolio_Watcher_detach, 0); rb_define_method(cCoolio_Watcher, "enable", Coolio_Watcher_enable, 0); rb_define_method(cCoolio_Watcher, "disable", Coolio_Watcher_disable, 0); rb_define_method(cCoolio_Watcher, "evloop", Coolio_Watcher_evloop, 0); rb_define_method(cCoolio_Watcher, "attached?", Coolio_Watcher_attached, 0); rb_define_method(cCoolio_Watcher, "enabled?", Coolio_Watcher_enabled, 0); } static VALUE Coolio_Watcher_allocate(VALUE klass) { struct Coolio_Watcher *watcher_data = (struct Coolio_Watcher *)xmalloc(sizeof(struct Coolio_Watcher)); watcher_data->loop = Qnil; watcher_data->enabled = 0; return Data_Wrap_Struct(klass, Coolio_Watcher_mark, Coolio_Watcher_free, watcher_data); } static void Coolio_Watcher_mark(struct Coolio_Watcher *watcher_data) { if(watcher_data->loop != Qnil) rb_gc_mark(watcher_data->loop); } static void Coolio_Watcher_free(struct Coolio_Watcher *watcher_data) { xfree(watcher_data); } static VALUE Coolio_Watcher_initialize(VALUE self) { rb_raise(rb_eRuntimeError, "watcher base class should not be initialized directly"); } /** * call-seq: * Coolio::Watcher.attach(loop) -> Coolio::Watcher * * Attach the watcher to the given Coolio::Loop. If the watcher is already attached * to a loop, detach it from the old one and attach it to the new one. */ static VALUE Coolio_Watcher_attach(VALUE self, VALUE loop) { VALUE loop_watchers, active_watchers; struct Coolio_Watcher *watcher_data; Data_Get_Struct(self, struct Coolio_Watcher, watcher_data); watcher_data->enabled = 1; loop_watchers = rb_iv_get(loop, "@watchers"); if(loop_watchers == Qnil) { /* we should never get here */ loop_watchers = rb_hash_new(); rb_iv_set(loop, "@watchers", loop_watchers); } /* Add us to the loop's array of active watchers. This is mainly done * to keep the VM from garbage collecting watchers that are associated * with a loop (and also lets you see within Ruby which watchers are * associated with a given loop), but isn't really necessary for any * other reason */ rb_hash_aset(loop_watchers, self, Qtrue); active_watchers = rb_iv_get(loop, "@active_watchers"); if(active_watchers == Qnil) active_watchers = INT2NUM(1); else active_watchers = INT2NUM(NUM2INT(active_watchers) + 1); rb_iv_set(loop, "@active_watchers", active_watchers); return self; } /** * call-seq: * Coolio::Watcher.detach -> Coolio::Watcher * * Detach the watcher from its current Coolio::Loop. */ static VALUE Coolio_Watcher_detach(VALUE self) { struct Coolio_Watcher *watcher_data; struct Coolio_Loop *loop_data; VALUE loop_watchers; int i; Data_Get_Struct(self, struct Coolio_Watcher, watcher_data); if(watcher_data->loop == Qnil) rb_raise(rb_eRuntimeError, "not attached to a loop"); loop_watchers = rb_iv_get(watcher_data->loop, "@watchers"); /* Remove us from the loop's array of active watchers. This likely * has negative performance and scalability characteristics as this * isn't an O(1) operation. Hopefully there's a better way... * Trying a hash for now... */ rb_hash_delete(loop_watchers, self); if(watcher_data->enabled) { rb_iv_set( watcher_data->loop, "@active_watchers", INT2NUM(NUM2INT(rb_iv_get(watcher_data->loop, "@active_watchers")) - 1) ); } watcher_data->enabled = 0; Data_Get_Struct(watcher_data->loop, struct Coolio_Loop, loop_data); /* Iterate through the events in the loop's event buffer. If there * are any pending events from this watcher, mark them NULL. The * dispatch loop will skip them. This prevents watchers earlier * in the event buffer from detaching others which may have pending * events in the buffer but get garbage collected in the meantime */ for(i = 0; i < loop_data->events_received; i++) { if(loop_data->eventbuf[i].watcher == self) loop_data->eventbuf[i].watcher = Qnil; } watcher_data->loop = Qnil; return self; } /** * call-seq: * Coolio::Watcher.enable -> Coolio::Watcher * * Re-enable a watcher which has been temporarily disabled. See the * disable method for a more thorough explanation. */ static VALUE Coolio_Watcher_enable(VALUE self) { struct Coolio_Watcher *watcher_data; Data_Get_Struct(self, struct Coolio_Watcher, watcher_data); if(watcher_data->enabled) rb_raise(rb_eRuntimeError, "already enabled"); watcher_data->enabled = 1; rb_iv_set( watcher_data->loop, "@active_watchers", INT2NUM(NUM2INT(rb_iv_get(watcher_data->loop, "@active_watchers")) + 1) ); return self; } /** * call-seq: * Coolio::Watcher.disable -> Coolio::Watcher * * Temporarily disable an event watcher which is attached to a loop. * This is useful if you wish to toggle event monitoring on and off. */ static VALUE Coolio_Watcher_disable(VALUE self) { struct Coolio_Watcher *watcher_data; Data_Get_Struct(self, struct Coolio_Watcher, watcher_data); if(!watcher_data->enabled) rb_raise(rb_eRuntimeError, "already disabled"); watcher_data->enabled = 0; rb_iv_set( watcher_data->loop, "@active_watchers", INT2NUM(NUM2INT(rb_iv_get(watcher_data->loop, "@active_watchers")) - 1) ); return self; } /** * call-seq: * Coolio::Watcher.evloop -> Coolio::Loop * * Return the loop to which we're currently attached */ static VALUE Coolio_Watcher_evloop(VALUE self) { struct Coolio_Watcher *watcher_data; Data_Get_Struct(self, struct Coolio_Watcher, watcher_data); return watcher_data->loop; } /** * call-seq: * Coolio::Watcher.attached? -> Boolean * * Is the watcher currently attached to an event loop? */ static VALUE Coolio_Watcher_attached(VALUE self) { return Coolio_Watcher_evloop(self) != Qnil; } /** * call-seq: * Coolio::Watcher.enabled? -> Boolean * * Is the watcher currently enabled? */ static VALUE Coolio_Watcher_enabled(VALUE self) { struct Coolio_Watcher *watcher_data; Data_Get_Struct(self, struct Coolio_Watcher, watcher_data); return watcher_data->enabled ? Qtrue : Qfalse; } cool.io-1.8.1/ext/cool.io/libev.c0000644000004100000410000000026614632135713016533 0ustar www-datawww-data/* * Copyright (C) 2007 Tony Arcieri * You may redistribute this under the terms of the Ruby license. * See LICENSE for details */ #include "ev_wrap.h" #include "../libev/ev.c" cool.io-1.8.1/ext/cool.io/cool.io.h0000644000004100000410000000243514632135713017001 0ustar www-datawww-data/* * Copyright (C) 2007-10 Tony Arcieri * You may redistribute this under the terms of the Ruby license. * See LICENSE for details */ #ifndef COOLIO_H #define COOLIO_H #include "ruby.h" #if defined(HAVE_RUBY_IO_H) #include "ruby/io.h" #else #include "rubyio.h" #endif #ifdef GetReadFile #define FPTR_TO_FD(fptr) (fileno(GetReadFile(fptr))) #else #if !HAVE_RB_IO_T || (RUBY_VERSION_MAJOR == 1 && RUBY_VERSION_MINOR == 8) #define FPTR_TO_FD(fptr) fileno(fptr->f) #else #define FPTR_TO_FD(fptr) fptr->fd #endif #endif struct Coolio_Event { /* These values are used to extract events from libev callbacks */ VALUE watcher; int revents; }; struct Coolio_Loop { struct ev_loop *ev_loop; struct ev_timer timer; /* for timeouts */ int running; int events_received; int eventbuf_size; struct Coolio_Event *eventbuf; }; struct Coolio_Watcher { union { struct ev_io ev_io; struct ev_timer ev_timer; struct ev_stat ev_stat; } event_types; int enabled; VALUE loop; void (*dispatch_callback)(VALUE self, int revents); }; void Coolio_Loop_process_event(VALUE watcher, int revents); void Init_coolio_loop(); void Init_coolio_watcher(); void Init_coolio_iowatcher(); void Init_coolio_timer_watcher(); void Init_coolio_stat_watcher(); void Init_coolio_utils(); #endif cool.io-1.8.1/ext/cool.io/iowatcher.c0000644000004100000410000001317214632135713017417 0ustar www-datawww-data/* * Copyright (C) 2007-10 Tony Arcieri * You may redistribute this under the terms of the Ruby license. * See LICENSE for details */ #include "ruby.h" #if defined(HAVE_RUBY_IO_H) #include "ruby/io.h" #else #include "rubyio.h" #endif #include "ev_wrap.h" #include "cool.io.h" #include "watcher.h" static VALUE mCoolio = Qnil; static VALUE cCoolio_Watcher = Qnil; static VALUE cCoolio_Loop = Qnil; static VALUE cCoolio_IOWatcher = Qnil; static VALUE Coolio_IOWatcher_initialize(int argc, VALUE *argv, VALUE self); static VALUE Coolio_IOWatcher_attach(VALUE self, VALUE loop); static VALUE Coolio_IOWatcher_detach(VALUE self); static VALUE Coolio_IOWatcher_enable(VALUE self); static VALUE Coolio_IOWatcher_disable(VALUE self); static VALUE Coolio_IOWatcher_on_readable(VALUE self); static VALUE Coolio_IOWatcher_on_writable(VALUE self); static void Coolio_IOWatcher_libev_callback(struct ev_loop *ev_loop, struct ev_io *io, int revents); static void Coolio_IOWatcher_dispatch_callback(VALUE self, int revents); /* * Coolio::IOWatcher monitors Ruby IO objects for readability or writability. * This allows your application to block while the kernel is writing out * data and fill the read or write buffer whenever there is space available. */ void Init_coolio_iowatcher() { mCoolio = rb_define_module("Coolio"); cCoolio_Watcher = rb_define_class_under(mCoolio, "Watcher", rb_cObject); cCoolio_IOWatcher = rb_define_class_under(mCoolio, "IOWatcher", cCoolio_Watcher); cCoolio_Loop = rb_define_class_under(mCoolio, "Loop", rb_cObject); rb_define_method(cCoolio_IOWatcher, "initialize", Coolio_IOWatcher_initialize, -1); rb_define_method(cCoolio_IOWatcher, "attach", Coolio_IOWatcher_attach, 1); rb_define_method(cCoolio_IOWatcher, "detach", Coolio_IOWatcher_detach, 0); rb_define_method(cCoolio_IOWatcher, "enable", Coolio_IOWatcher_enable, 0); rb_define_method(cCoolio_IOWatcher, "disable", Coolio_IOWatcher_disable, 0); rb_define_method(cCoolio_IOWatcher, "on_readable", Coolio_IOWatcher_on_readable, 0); rb_define_method(cCoolio_IOWatcher, "on_writable", Coolio_IOWatcher_on_writable, 0); } /** * call-seq: * Coolio::IOWatcher.initialize(IO, events = 'r') -> Coolio::IOWatcher * * Create a new Coolio::IOWatcher for the given IO object and add it to the given Coolio::Loop */ static VALUE Coolio_IOWatcher_initialize(int argc, VALUE *argv, VALUE self) { VALUE io, flags; char *flags_str; int events; struct Coolio_Watcher *watcher_data; rb_scan_args(argc, argv, "11", &io, &flags); if(flags != Qnil) flags_str = RSTRING_PTR(rb_String(flags)); else flags_str = "r"; if(!strcmp(flags_str, "r")) events = EV_READ; else if(!strcmp(flags_str, "w")) events = EV_WRITE; else if(!strcmp(flags_str, "rw")) events = EV_READ | EV_WRITE; else rb_raise(rb_eArgError, "invalid event type: '%s' (must be 'r', 'w', or 'rw')", flags_str); Data_Get_Struct(self, struct Coolio_Watcher, watcher_data); io = rb_convert_type(io, T_FILE, "IO", "to_io"); watcher_data->dispatch_callback = Coolio_IOWatcher_dispatch_callback; #ifdef HAVE_RB_IO_DESCRIPTOR ev_io_init(&watcher_data->event_types.ev_io, Coolio_IOWatcher_libev_callback, rb_io_descriptor(io), events); #else #if defined(HAVE_RB_IO_T) rb_io_t *fptr; #else OpenFile *fptr; #endif GetOpenFile(io, fptr); ev_io_init(&watcher_data->event_types.ev_io, Coolio_IOWatcher_libev_callback, FPTR_TO_FD(fptr), events); #endif watcher_data->event_types.ev_io.data = (void *)self; return Qnil; } /** * call-seq: * Coolio::IOWatcher.attach(loop) -> Coolio::IOWatcher * * Attach the IO watcher to the given Coolio::Loop. If the watcher is already attached * to a loop, detach it from the old one and attach it to the new one. */ static VALUE Coolio_IOWatcher_attach(VALUE self, VALUE loop) { Watcher_Attach(io, Coolio_IOWatcher_detach, self, loop); return self; } /** * call-seq: * Coolio::IOWatcher.detach -> Coolio::IOWatcher * * Detach the IO watcher from its current Coolio::Loop. */ static VALUE Coolio_IOWatcher_detach(VALUE self) { Watcher_Detach(io, self); return self; } /** * call-seq: * Coolio::IOWatcher.enable -> Coolio::IOWatcher * * Re-enable an IO watcher which has been temporarily disabled. See the * disable method for a more thorough explanation. */ static VALUE Coolio_IOWatcher_enable(VALUE self) { Watcher_Enable(io, self); return self; } /** * call-seq: * Coolio::IOWatcher.disable -> Coolio::IOWatcher * * Temporarily disable an IO watcher which is attached to a loop. * This is useful if you wish to toggle event monitoring on and off. */ static VALUE Coolio_IOWatcher_disable(VALUE self) { Watcher_Disable(io, self); return self; } /** * call-seq: * Coolio::IOWatcher#on_readable -> nil * * Called whenever the IO object associated with the IOWatcher is readable */ static VALUE Coolio_IOWatcher_on_readable(VALUE self) { return Qnil; } /** * call-seq: * Coolio::IOWatcher#on_writable -> nil * * Called whenever the IO object associated with the IOWatcher is writable */ static VALUE Coolio_IOWatcher_on_writable(VALUE self) { return Qnil; } /* libev callback */ static void Coolio_IOWatcher_libev_callback(struct ev_loop *ev_loop, struct ev_io *io, int revents) { Coolio_Loop_process_event((VALUE)io->data, revents); } /* Coolio::Loop dispatch callback */ static void Coolio_IOWatcher_dispatch_callback(VALUE self, int revents) { if(revents & EV_READ) rb_funcall(self, rb_intern("on_readable"), 0); else if(revents & EV_WRITE) rb_funcall(self, rb_intern("on_writable"), 0); else rb_raise(rb_eRuntimeError, "unknown revents value for ev_io: %d", revents); } cool.io-1.8.1/ext/cool.io/stat_watcher.c0000644000004100000410000002007014632135713020115 0ustar www-datawww-data/* * Copyright (C) 2009-10 Tony Arcieri * You may redistribute this under the terms of the Ruby license. * See LICENSE for details */ #include "ruby.h" #include "ev_wrap.h" #include "cool.io.h" #include "watcher.h" static VALUE mCoolio = Qnil; static VALUE cCoolio_Watcher = Qnil; static VALUE cCoolio_StatWatcher = Qnil; static VALUE cCoolio_StatInfo = Qnil; static VALUE cCoolio_Loop = Qnil; static VALUE Coolio_StatWatcher_initialize(int argc, VALUE *argv, VALUE self); static VALUE Coolio_StatWatcher_attach(VALUE self, VALUE loop); static VALUE Coolio_StatWatcher_detach(VALUE self); static VALUE Coolio_StatWatcher_enable(VALUE self); static VALUE Coolio_StatWatcher_disable(VALUE self); static VALUE Coolio_StatWatcher_on_change(VALUE self, VALUE previous, VALUE current); static VALUE Coolio_StatWatcher_path(VALUE self); static VALUE Coolio_StatInfo_build(ev_statdata *statdata_struct); static void Coolio_StatWatcher_libev_callback(struct ev_loop *ev_loop, struct ev_stat *stat, int revents); static void Coolio_StatWatcher_dispatch_callback(VALUE self, int revents); /* * Coolio::StatWatcher lets you create either one-shot or periodic stats which * run within Coolio's event loop. It's useful for creating timeouts or * events which fire periodically. **/ void Init_coolio_stat_watcher() { mCoolio = rb_define_module("Coolio"); cCoolio_Watcher = rb_define_class_under(mCoolio, "Watcher", rb_cObject); cCoolio_StatWatcher = rb_define_class_under(mCoolio, "StatWatcher", cCoolio_Watcher); cCoolio_StatInfo = rb_struct_define("StatInfo", "mtime", "ctime", "atime", "dev", "ino", "mode", "nlink", "uid", "guid", "rdev", "size", "blksize", "blocks", NULL); cCoolio_Loop = rb_define_class_under(mCoolio, "Loop", rb_cObject); rb_define_method(cCoolio_StatWatcher, "initialize", Coolio_StatWatcher_initialize, -1); rb_define_method(cCoolio_StatWatcher, "attach", Coolio_StatWatcher_attach, 1); rb_define_method(cCoolio_StatWatcher, "detach", Coolio_StatWatcher_detach, 0); rb_define_method(cCoolio_StatWatcher, "enable", Coolio_StatWatcher_enable, 0); rb_define_method(cCoolio_StatWatcher, "disable", Coolio_StatWatcher_disable, 0); rb_define_method(cCoolio_StatWatcher, "on_change", Coolio_StatWatcher_on_change, 2); rb_define_method(cCoolio_StatWatcher, "path", Coolio_StatWatcher_path, 0); } /** * call-seq: * Coolio::StatWatcher.initialize(path, interval = 0) -> Coolio::StatWatcher * * Create a new Coolio::StatWatcher for the given path. This will monitor the * given path for changes at the filesystem level. The interval argument * specified how often in seconds the path should be polled for changes. * Setting interval to zero uses an "automatic" value (typically around 5 * seconds) which optimizes performance. Otherwise, values less than * 0.1 are not particularly meaningful. Where available (at present, on Linux) * high performance file monitoring interfaces will be used instead of polling. */ static VALUE Coolio_StatWatcher_initialize(int argc, VALUE *argv, VALUE self) { VALUE path, interval; struct Coolio_Watcher *watcher_data; rb_scan_args(argc, argv, "11", &path, &interval); if(interval != Qnil) interval = rb_convert_type(interval, T_FLOAT, "Float", "to_f"); path = rb_String(path); rb_iv_set(self, "@path", path); Data_Get_Struct(self, struct Coolio_Watcher, watcher_data); watcher_data->dispatch_callback = Coolio_StatWatcher_dispatch_callback; ev_stat_init( &watcher_data->event_types.ev_stat, Coolio_StatWatcher_libev_callback, RSTRING_PTR(path), interval == Qnil ? 0 : NUM2DBL(interval) ); watcher_data->event_types.ev_stat.data = (void *)self; return Qnil; } /** * call-seq: * Coolio::StatWatcher.attach(loop) -> Coolio::StatWatcher * * Attach the stat watcher to the given Coolio::Loop. If the watcher is already * attached to a loop, detach it from the old one and attach it to the new one. */ static VALUE Coolio_StatWatcher_attach(VALUE self, VALUE loop) { ev_tstamp interval, timeout; struct Coolio_Loop *loop_data; struct Coolio_Watcher *watcher_data; if(!rb_obj_is_kind_of(loop, cCoolio_Loop)) rb_raise(rb_eArgError, "expected loop to be an instance of Coolio::Loop, not %s", RSTRING_PTR(rb_inspect(loop))); Data_Get_Struct(loop, struct Coolio_Loop, loop_data); Data_Get_Struct(self, struct Coolio_Watcher, watcher_data); if(watcher_data->loop != Qnil) Coolio_StatWatcher_detach(self); watcher_data->loop = loop; ev_stat_start(loop_data->ev_loop, &watcher_data->event_types.ev_stat); rb_call_super(1, &loop); return self; } /** * call-seq: * Coolio::StatWatcher.detach -> Coolio::StatWatcher * * Detach the stat watcher from its current Coolio::Loop. */ static VALUE Coolio_StatWatcher_detach(VALUE self) { Watcher_Detach(stat, self); return self; } /** * call-seq: * Coolio::StatWatcher.enable -> Coolio::StatWatcher * * Re-enable a stat watcher which has been temporarily disabled. See the * disable method for a more thorough explanation. */ static VALUE Coolio_StatWatcher_enable(VALUE self) { Watcher_Enable(stat, self); return self; } /** * call-seq: * Coolio::StatWatcher.disable -> Coolio::StatWatcher * * Temporarily disable a stat watcher which is attached to a loop. * This is useful if you wish to toggle event monitoring on and off. */ static VALUE Coolio_StatWatcher_disable(VALUE self) { Watcher_Disable(stat, self); return self; } /** * call-seq: * Coolio::StatWatcher#on_change -> nil * * Called whenever the status of the given path changes */ static VALUE Coolio_StatWatcher_on_change(VALUE self, VALUE previous, VALUE current) { return Qnil; } /** * call-seq: * Coolio::StatWatcher#path -> String * * Retrieve the path associated with this StatWatcher */ static VALUE Coolio_StatWatcher_path(VALUE self) { return rb_iv_get(self, "@path"); } /* libev callback */ static void Coolio_StatWatcher_libev_callback(struct ev_loop *ev_loop, struct ev_stat *stat, int revents) { Coolio_Loop_process_event((VALUE)stat->data, revents); } /* Coolio::Loop dispatch callback */ static void Coolio_StatWatcher_dispatch_callback(VALUE self, int revents) { struct Coolio_Watcher *watcher_data; Data_Get_Struct(self, struct Coolio_Watcher, watcher_data); VALUE previous_statdata = Coolio_StatInfo_build(&watcher_data->event_types.ev_stat.prev); VALUE current_statdata = Coolio_StatInfo_build(&watcher_data->event_types.ev_stat.attr); rb_funcall(self, rb_intern("on_change"), 2, previous_statdata, current_statdata); } /** * Convience method to build StatInfo structs given an ev_statdata * */ static VALUE Coolio_StatInfo_build(ev_statdata *statdata_struct) { VALUE at_method = rb_intern("at"); VALUE cTime = rb_const_get(rb_cObject, rb_intern("Time")); VALUE mtime = Qnil; VALUE ctime = Qnil; VALUE atime = Qnil; VALUE dev = Qnil; VALUE ino = Qnil; VALUE mode = Qnil; VALUE nlink = Qnil; VALUE uid = Qnil; VALUE gid = Qnil; VALUE rdev = Qnil; VALUE size = Qnil; VALUE blksize = Qnil; VALUE blocks = Qnil; mtime = rb_funcall(cTime, at_method, 1, INT2NUM(statdata_struct->st_mtime)); ctime = rb_funcall(cTime, at_method, 1, INT2NUM(statdata_struct->st_ctime)); atime = rb_funcall(cTime, at_method, 1, INT2NUM(statdata_struct->st_atime)); dev = INT2NUM(statdata_struct->st_dev); ino = INT2NUM(statdata_struct->st_ino); mode = INT2NUM(statdata_struct->st_mode); nlink = INT2NUM(statdata_struct->st_nlink); uid = INT2NUM(statdata_struct->st_uid); gid = INT2NUM(statdata_struct->st_gid); rdev = INT2NUM(statdata_struct->st_rdev); size = INT2NUM(statdata_struct->st_size); #ifdef HAVE_ST_BLKSIZE blksize = INT2NUM(statdata_struct->st_blksize); blocks = INT2NUM(statdata_struct->st_blocks); #endif return rb_struct_new(cCoolio_StatInfo, mtime, ctime, atime, dev, ino, mode, nlink, uid, gid, rdev, size, blksize, blocks, NULL); } cool.io-1.8.1/ext/cool.io/loop.c0000644000004100000410000002060514632135713016402 0ustar www-datawww-data/* * Copyright (C) 2007-10 Tony Arcieri * You may redistribute this under the terms of the Ruby license. * See LICENSE for details */ #include #include "ruby.h" #include "ev_wrap.h" #include "cool.io.h" static VALUE mCoolio = Qnil; static VALUE cCoolio_Loop = Qnil; static VALUE Coolio_Loop_allocate(VALUE klass); static void Coolio_Loop_mark(struct Coolio_Loop *loop); static void Coolio_Loop_free(struct Coolio_Loop *loop); static VALUE Coolio_Loop_ev_loop_new(VALUE self, VALUE flags); static VALUE Coolio_Loop_run_once(int argc, VALUE *argv, VALUE self); static VALUE Coolio_Loop_run_nonblock(VALUE self); static void Coolio_Loop_timeout_callback(struct ev_loop *ev_loop, struct ev_timer *timer, int revents); static void Coolio_Loop_dispatch_events(struct Coolio_Loop *loop_data); #define DEFAULT_EVENTBUF_SIZE 32 #define RUN_LOOP(loop_data, options) \ loop_data->running = 1; \ ev_loop(loop_data->ev_loop, options); \ loop_data->running = 0; /* * Coolio::Loop represents an event loop. Event watchers can be attached and * unattached. When an event loop is run, all currently attached watchers * are monitored for events, and their respective callbacks are signaled * whenever events occur. */ void Init_coolio_loop() { mCoolio = rb_define_module("Coolio"); cCoolio_Loop = rb_define_class_under(mCoolio, "Loop", rb_cObject); rb_define_alloc_func(cCoolio_Loop, Coolio_Loop_allocate); rb_define_private_method(cCoolio_Loop, "ev_loop_new", Coolio_Loop_ev_loop_new, 1); rb_define_method(cCoolio_Loop, "run_once", Coolio_Loop_run_once, -1); rb_define_method(cCoolio_Loop, "run_nonblock", Coolio_Loop_run_nonblock, 0); } static VALUE Coolio_Loop_allocate(VALUE klass) { struct Coolio_Loop *loop = (struct Coolio_Loop *)xmalloc(sizeof(struct Coolio_Loop)); loop->ev_loop = 0; ev_init(&loop->timer, Coolio_Loop_timeout_callback); loop->running = 0; loop->events_received = 0; loop->eventbuf_size = DEFAULT_EVENTBUF_SIZE; loop->eventbuf = (struct Coolio_Event *)xmalloc(sizeof(struct Coolio_Event) * DEFAULT_EVENTBUF_SIZE); return Data_Wrap_Struct(klass, Coolio_Loop_mark, Coolio_Loop_free, loop); } static void Coolio_Loop_mark(struct Coolio_Loop *loop) { } static void Coolio_Loop_free(struct Coolio_Loop *loop) { if(!loop->ev_loop) return; ev_loop_destroy(loop->ev_loop); xfree(loop->eventbuf); xfree(loop); } /* Wrapper for populating a Coolio_Loop struct with a new event loop */ static VALUE Coolio_Loop_ev_loop_new(VALUE self, VALUE flags) { struct Coolio_Loop *loop_data; Data_Get_Struct(self, struct Coolio_Loop, loop_data); if(loop_data->ev_loop) rb_raise(rb_eRuntimeError, "loop already initialized"); loop_data->ev_loop = ev_loop_new(NUM2INT(flags)); return Qnil; } /* libev callback for receiving events */ void Coolio_Loop_process_event(VALUE watcher, int revents) { struct Coolio_Loop *loop_data; struct Coolio_Watcher *watcher_data; /* The Global VM lock isn't held right now, but hopefully * we can still do this safely */ Data_Get_Struct(watcher, struct Coolio_Watcher, watcher_data); Data_Get_Struct(watcher_data->loop, struct Coolio_Loop, loop_data); /* Well, what better place to explain how this all works than * where the most wonky and convoluted stuff is going on! * * Our call path up to here looks a little something like: * * -> release GVL -> event syscall -> libev callback * (GVL = Global VM Lock) ^^^ You are here * * We released the GVL in the Coolio_Loop_run_once() function * so other Ruby threads can run while we make a blocking * system call (one of epoll, kqueue, port, poll, or select, * depending on the platform). * * More specifically, this is a libev callback abstraction * called from a real libev callback in every watcher, * hence this function not being static. The real libev * callbacks are event-specific and handled in a watcher. * * For syscalls like epoll and kqueue, the kernel tells libev * a pointer (to a structure with a pointer) to the watcher * object. No data structure lookups are required at all * (beyond structs), it's smooth O(1) sailing the entire way. * Then libev calls out to the watcher's callback, which * calls this function. * * Now, you may be curious: if the watcher already knew what * event fired, why the hell is it telling the loop? Why * doesn't it just rb_funcall() the appropriate callback? * * Well, the problem is the Global VM Lock isn't held right * now, so we can't rb_funcall() anything. In order to get * it back we have to: * * stash event and return -> acquire GVL -> dispatch to Ruby * * Which is kinda ugly and confusing, but still gives us * an O(1) event loop whose heart is in the kernel itself. w00t! * * So, stash the event in the loop's data struct. When we return * the ev_loop() call being made in the Coolio_Loop_run_once_blocking() * function below will also return, at which point the GVL is * reacquired and we can call out to Ruby */ /* Grow the event buffer if it's too small */ if(loop_data->events_received >= loop_data->eventbuf_size) { loop_data->eventbuf_size *= 2; loop_data->eventbuf = (struct Coolio_Event *)xrealloc( loop_data->eventbuf, sizeof(struct Coolio_Event) * loop_data->eventbuf_size ); } loop_data->eventbuf[loop_data->events_received].watcher = watcher; loop_data->eventbuf[loop_data->events_received].revents = revents; loop_data->events_received++; } /* Called whenever a timeout fires on the event loop */ static void Coolio_Loop_timeout_callback(struct ev_loop *ev_loop, struct ev_timer *timer, int revents) { /* We don't actually need to do anything here, the mere firing of the timer is sufficient to interrupt the selector. However, libev still wants a callback */ } /** * call-seq: * Coolio::Loop.run_once -> nil * * Run the Coolio::Loop once, blocking until events are received. */ static VALUE Coolio_Loop_run_once(int argc, VALUE *argv, VALUE self) { VALUE timeout; VALUE nevents; struct Coolio_Loop *loop_data; rb_scan_args(argc, argv, "01", &timeout); if (timeout != Qnil && NUM2DBL(timeout) < 0) { rb_raise(rb_eArgError, "time interval must be positive"); } Data_Get_Struct(self, struct Coolio_Loop, loop_data); assert(loop_data->ev_loop && !loop_data->events_received); /* Implement the optional timeout (if any) as a ev_timer */ /* Using the technique written at http://pod.tst.eu/http://cvs.schmorp.de/libev/ev.pod#code_ev_timer_code_relative_and_opti, the timer is not stopped/started everytime when timeout is specified, instead, the timer is stopped when timeout is not specified. */ if (timeout != Qnil) { /* It seems libev is not a fan of timers being zero, so fudge a little */ loop_data->timer.repeat = NUM2DBL(timeout) + 0.0001; ev_timer_again(loop_data->ev_loop, &loop_data->timer); } else { ev_timer_stop(loop_data->ev_loop, &loop_data->timer); } /* libev is patched to release the GIL when it makes its system call */ RUN_LOOP(loop_data, EVLOOP_ONESHOT); Coolio_Loop_dispatch_events(loop_data); nevents = INT2NUM(loop_data->events_received); loop_data->events_received = 0; return nevents; } /** * call-seq: * Coolio::Loop.run_nonblock -> nil * * Run the Coolio::Loop once, but return immediately if there are no pending events. */ static VALUE Coolio_Loop_run_nonblock(VALUE self) { struct Coolio_Loop *loop_data; VALUE nevents; Data_Get_Struct(self, struct Coolio_Loop, loop_data); assert(loop_data->ev_loop && !loop_data->events_received); RUN_LOOP(loop_data, EVLOOP_NONBLOCK); Coolio_Loop_dispatch_events(loop_data); nevents = INT2NUM(loop_data->events_received); loop_data->events_received = 0; return nevents; } static void Coolio_Loop_dispatch_events(struct Coolio_Loop *loop_data) { int i; struct Coolio_Watcher *watcher_data; for(i = 0; i < loop_data->events_received; i++) { /* A watcher with pending events may have been detached from the loop * during the dispatch process. If so, the watcher clears the pending * events, so skip over them */ if(loop_data->eventbuf[i].watcher == Qnil) continue; Data_Get_Struct(loop_data->eventbuf[i].watcher, struct Coolio_Watcher, watcher_data); watcher_data->dispatch_callback(loop_data->eventbuf[i].watcher, loop_data->eventbuf[i].revents); } } cool.io-1.8.1/ext/cool.io/watcher.h0000644000004100000410000000444514632135713017077 0ustar www-datawww-data/* * Copyright (C) 2007-10 Tony Arcieri * You may redistribute this under the terms of the Ruby license. * See LICENSE for details */ #ifndef WATCHER_H #define WATCHER_H #define Watcher_Attach(watcher_type, detach_func, watcher, loop) \ struct Coolio_Watcher *watcher_data; \ struct Coolio_Loop *loop_data; \ \ if(!rb_obj_is_kind_of(loop, cCoolio_Loop)) \ rb_raise(rb_eArgError, "expected loop to be an instance of Coolio::Loop, not %s", RSTRING_PTR(rb_inspect(loop))); \ \ Data_Get_Struct(watcher, struct Coolio_Watcher, watcher_data); \ Data_Get_Struct(loop, struct Coolio_Loop, loop_data); \ \ if(watcher_data->loop != Qnil) \ detach_func(watcher); \ \ watcher_data->loop = loop; \ ev_##watcher_type##_start(loop_data->ev_loop, &watcher_data->event_types.ev_##watcher_type); \ rb_call_super(1, &loop) #define Watcher_Detach(watcher_type, watcher) \ struct Coolio_Watcher *watcher_data; \ struct Coolio_Loop *loop_data; \ \ Data_Get_Struct(watcher, struct Coolio_Watcher, watcher_data); \ \ if(watcher_data->loop == Qnil) \ rb_raise(rb_eRuntimeError, "not attached to a loop"); \ \ Data_Get_Struct(watcher_data->loop, struct Coolio_Loop, loop_data); \ \ ev_##watcher_type##_stop(loop_data->ev_loop, &watcher_data->event_types.ev_##watcher_type); \ rb_call_super(0, 0) #define Watcher_Enable(watcher_type, watcher) \ struct Coolio_Watcher *watcher_data; \ struct Coolio_Loop *loop_data; \ \ Data_Get_Struct(watcher, struct Coolio_Watcher, watcher_data); \ \ if(watcher_data->loop == Qnil) \ rb_raise(rb_eRuntimeError, "not attached to a loop"); \ \ rb_call_super(0, 0); \ \ Data_Get_Struct(watcher_data->loop, struct Coolio_Loop, loop_data); \ \ ev_##watcher_type##_start(loop_data->ev_loop, &watcher_data->event_types.ev_##watcher_type) #define Watcher_Disable(watcher_type, watcher) \ struct Coolio_Watcher *watcher_data; \ struct Coolio_Loop *loop_data; \ \ Data_Get_Struct(watcher, struct Coolio_Watcher, watcher_data); \ \ if(watcher_data->loop == Qnil) \ rb_raise(rb_eRuntimeError, "not attached to a loop"); \ \ rb_call_super(0, 0); \ \ Data_Get_Struct(watcher_data->loop, struct Coolio_Loop, loop_data); \ \ ev_##watcher_type##_stop(loop_data->ev_loop, &watcher_data->event_types.ev_##watcher_type) #endif cool.io-1.8.1/ext/cool.io/timer_watcher.c0000644000004100000410000001537514632135713020276 0ustar www-datawww-data/* * Copyright (C) 2007 Tony Arcieri * You may redistribute this under the terms of the Ruby license. * See LICENSE for details */ #include "ruby.h" #include "ev_wrap.h" #include "cool.io.h" #include "watcher.h" static VALUE mCoolio = Qnil; static VALUE cCoolio_Watcher = Qnil; static VALUE cCoolio_TimerWatcher = Qnil; static VALUE cCoolio_Loop = Qnil; static VALUE Coolio_TimerWatcher_initialize(int argc, VALUE *argv, VALUE self); static VALUE Coolio_TimerWatcher_attach(VALUE self, VALUE loop); static VALUE Coolio_TimerWatcher_detach(VALUE self); static VALUE Coolio_TimerWatcher_enable(VALUE self); static VALUE Coolio_TimerWatcher_disable(VALUE self); static VALUE Coolio_TimerWatcher_reset(VALUE self); static VALUE Coolio_TimerWatcher_on_timer(VALUE self); static void Coolio_TimerWatcher_libev_callback(struct ev_loop *ev_loop, struct ev_timer *timer, int revents); static void Coolio_TimerWatcher_dispatch_callback(VALUE self, int revents); /* * Coolio::TimerWatcher lets you create either one-shot or periodic timers which * run within Coolio's event loop. It's useful for creating timeouts or * events which fire periodically. */ void Init_coolio_timer_watcher() { mCoolio = rb_define_module("Coolio"); cCoolio_Watcher = rb_define_class_under(mCoolio, "Watcher", rb_cObject); cCoolio_TimerWatcher = rb_define_class_under(mCoolio, "TimerWatcher", cCoolio_Watcher); cCoolio_Loop = rb_define_class_under(mCoolio, "Loop", rb_cObject); rb_define_method(cCoolio_TimerWatcher, "initialize", Coolio_TimerWatcher_initialize, -1); rb_define_method(cCoolio_TimerWatcher, "attach", Coolio_TimerWatcher_attach, 1); rb_define_method(cCoolio_TimerWatcher, "detach", Coolio_TimerWatcher_detach, 0); rb_define_method(cCoolio_TimerWatcher, "enable", Coolio_TimerWatcher_enable, 0); rb_define_method(cCoolio_TimerWatcher, "disable", Coolio_TimerWatcher_disable, 0); rb_define_method(cCoolio_TimerWatcher, "reset", Coolio_TimerWatcher_reset, 0); rb_define_method(cCoolio_TimerWatcher, "on_timer", Coolio_TimerWatcher_on_timer, 0); } /** * call-seq: * Coolio::TimerWatcher.initialize(interval, repeating = false) -> Coolio::TimerWatcher * * Create a new Coolio::TimerWatcher for the given IO object and add it to the * given Coolio::Loop. Interval defines a duration in seconds to wait for events, * and can be specified as an Integer or Float. Repeating is a boolean * indicating whether the timer is one shot or should fire on the given * interval. */ static VALUE Coolio_TimerWatcher_initialize(int argc, VALUE *argv, VALUE self) { VALUE interval, repeating; struct Coolio_Watcher *watcher_data; rb_scan_args(argc, argv, "11", &interval, &repeating); interval = rb_convert_type(interval, T_FLOAT, "Float", "to_f"); rb_iv_set(self, "@interval", interval); rb_iv_set(self, "@repeating", repeating); Data_Get_Struct(self, struct Coolio_Watcher, watcher_data); watcher_data->dispatch_callback = Coolio_TimerWatcher_dispatch_callback; ev_timer_init( &watcher_data->event_types.ev_timer, Coolio_TimerWatcher_libev_callback, NUM2DBL(interval), repeating == Qtrue ? NUM2DBL(interval) : 0 ); watcher_data->event_types.ev_timer.data = (void *)self; return Qnil; } /** * call-seq: * Coolio::TimerWatcher.attach(loop) -> Coolio::TimerWatcher * * Attach the timer watcher to the given Coolio::Loop. If the watcher is already * attached to a loop, detach it from the old one and attach it to the new one. */ static VALUE Coolio_TimerWatcher_attach(VALUE self, VALUE loop) { ev_tstamp interval, timeout; struct Coolio_Loop *loop_data; struct Coolio_Watcher *watcher_data; if(!rb_obj_is_kind_of(loop, cCoolio_Loop)) rb_raise(rb_eArgError, "expected loop to be an instance of Coolio::Loop, not %s", RSTRING_PTR(rb_inspect(loop))); Data_Get_Struct(loop, struct Coolio_Loop, loop_data); Data_Get_Struct(self, struct Coolio_Watcher, watcher_data); if(watcher_data->loop != Qnil) Coolio_TimerWatcher_detach(self); watcher_data->loop = loop; /* Calibrate timeout to account for potential drift */ interval = NUM2DBL(rb_iv_get(self, "@interval")); timeout = interval + ev_time() - ev_now(loop_data->ev_loop); ev_timer_set( &watcher_data->event_types.ev_timer, timeout, rb_iv_get(self, "@repeating") == Qtrue ? interval : 0 ); ev_timer_start(loop_data->ev_loop, &watcher_data->event_types.ev_timer); rb_call_super(1, &loop); return self; } /** * call-seq: * Coolio::TimerWatcher.detach -> Coolio::TimerWatcher * * Detach the timer watcher from its current Coolio::Loop. */ static VALUE Coolio_TimerWatcher_detach(VALUE self) { Watcher_Detach(timer, self); return self; } /** * call-seq: * Coolio::TimerWatcher.enable -> Coolio::TimerWatcher * * Re-enable a timer watcher which has been temporarily disabled. See the * disable method for a more thorough explanation. */ static VALUE Coolio_TimerWatcher_enable(VALUE self) { Watcher_Enable(timer, self); return self; } /** * call-seq: * Coolio::TimerWatcher.disable -> Coolio::TimerWatcher * * Temporarily disable a timer watcher which is attached to a loop. * This is useful if you wish to toggle event monitoring on and off. */ static VALUE Coolio_TimerWatcher_disable(VALUE self) { Watcher_Disable(timer, self); return self; } /** * call-seq: * Coolio::TimerWatcher#reset -> Coolio::TimerWatcher * * Reset the TimerWatcher. This behaves differently depending on if it's repeating. * * If the timer is pending, its pending status is cleared. * * If the timer is attached but nonrepeating, stop it (as if it timed out) * * If the timer is repeating, reset it so it will fire again after its given interval */ static VALUE Coolio_TimerWatcher_reset(VALUE self) { struct Coolio_Watcher *watcher_data; struct Coolio_Loop *loop_data; Data_Get_Struct(self, struct Coolio_Watcher, watcher_data); if(watcher_data->loop == Qnil) rb_raise(rb_eRuntimeError, "not attached to a loop"); Data_Get_Struct(watcher_data->loop, struct Coolio_Loop, loop_data); ev_timer_again(loop_data->ev_loop, &watcher_data->event_types.ev_timer); return self; } /** * call-seq: * Coolio::TimerWatcher#on_timer -> nil * * Called whenever the TimerWatcher fires */ static VALUE Coolio_TimerWatcher_on_timer(VALUE self) { return Qnil; } /* libev callback */ static void Coolio_TimerWatcher_libev_callback(struct ev_loop *ev_loop, struct ev_timer *timer, int revents) { Coolio_Loop_process_event((VALUE)timer->data, revents); } /* Coolio::Loop dispatch callback */ static void Coolio_TimerWatcher_dispatch_callback(VALUE self, int revents) { if(revents & EV_TIMEOUT) rb_funcall(self, rb_intern("on_timer"), 0); else rb_raise(rb_eRuntimeError, "unknown revents value for ev_timer: %d", revents); } cool.io-1.8.1/ext/cool.io/cool.io_ext.c0000644000004100000410000000074014632135713017651 0ustar www-datawww-data/* * Copyright (C) 2007-10 Tony Arcieri * You may redistribute this under the terms of the Ruby license. * See LICENSE for details */ #include "ruby.h" #include "ev_wrap.h" #include "cool.io.h" static VALUE mCoolio = Qnil; /* Initialize the coolness */ void Init_cool() { /* Initializers for other modules */ Init_coolio_loop(); Init_coolio_watcher(); Init_coolio_iowatcher(); Init_coolio_timer_watcher(); Init_coolio_stat_watcher(); Init_coolio_utils(); }cool.io-1.8.1/ext/cool.io/ev_wrap.h0000644000004100000410000000037414632135713017102 0ustar www-datawww-data#define EV_STANDALONE /* keeps ev from requiring config.h */ #ifdef _WIN32 #define EV_SELECT_IS_WINSOCKET 1 /* configure libev for windows select */ #define EV_USE_MONOTONIC 0 #define EV_USE_REALTIME 0 #endif #include "../libev/ev.h" cool.io-1.8.1/ext/cool.io/extconf.rb0000644000004100000410000000370114632135713017256 0ustar www-datawww-datarequire 'mkmf' libs = [] $defs << "-DRUBY_VERSION_CODE=#{RUBY_VERSION.gsub(/\D/, '')}" have_func('rb_io_descriptor') have_func('rb_thread_blocking_region') have_func('rb_thread_call_without_gvl') have_func('rb_thread_alone') have_func('rb_str_set_len') have_library('rt', 'clock_gettime') if have_header('ruby/io.h') $defs << '-DHAVE_RUBY_IO_H' end if have_header('ruby/thread.h') $defs << '-DHAVE_RUBY_THREAD_H' end if have_header('sys/select.h') $defs << '-DEV_USE_SELECT' end if have_header('poll.h') $defs << '-DEV_USE_POLL' end if have_header('sys/epoll.h') $defs << '-DEV_USE_EPOLL' end if have_header('sys/event.h') and have_header('sys/queue.h') $defs << '-DEV_USE_KQUEUE' end if have_header('port.h') $defs << '-DEV_USE_PORT' end have_header('sys/resource.h') # ncpu detection specifics case RUBY_PLATFORM when /linux/ $defs << '-DHAVE_LINUX_PROCFS' else if have_func('sysctlbyname', ['sys/param.h', 'sys/sysctl.h']) $defs << '-DHAVE_SYSCTLBYNAME' end end if RUBY_PLATFORM =~ /solaris/ # libev/ev.c requires NSIG which is undefined if _XOPEN_SOURCE is defined $defs << '-D__EXTENSIONS__' end $LIBS << ' ' << libs.join(' ') dir_config('cool.io_ext') create_makefile('cool.io_ext') # win32 needs to link in "just the right order" for some reason or ioctlsocket will be mapped to an [inverted] ruby specific version. See libev mailing list for (not so helpful discussion--true cause I'm not sure, but this overcomes the symptom) if RUBY_PLATFORM =~ /mingw|mswin/ makefile_contents = File.read 'Makefile' # "Init_cool could not be found" when loading cool.io.so. # I'm not sure why this is needed. But this line causes "1114 A dynamic link library (DLL) initialization routine failed." So I commented out this line. #makefile_contents.gsub! 'DLDFLAGS = ', 'DLDFLAGS = -export-all ' makefile_contents.gsub! /LIBS = (.*) (\S*ws2_32\S*)/i, 'LIBS = \\2 \\1' File.open('Makefile', 'w') { |f| f.write makefile_contents } end cool.io-1.8.1/.travis.yml0000644000004100000410000000023214632135713015226 0ustar www-datawww-datalanguage: ruby sudo: false rvm: - 2.4.10 - 2.5 - 2.6 - 2.7 - ruby-head - rbx matrix: allow_failures: - rvm: ruby-head - rvm: rbx cool.io-1.8.1/README.md0000644000004100000410000001240414632135713014400 0ustar www-datawww-dataCool.io ======= Cool.io is an event library for Ruby, built on the libev event library which provides a cross-platform interface to high performance system calls . This includes the epoll system call for Linux, the kqueue system call for BSDs and OS X, and the completion ports interface for Solaris. Cool.io also binds asynchronous wrappers to Ruby's core socket classes so you can use them in conjunction with Cool.io to build asynchronous event-driven applications. You can include Cool.io in your programs with: ```ruby require 'cool.io' ``` Anatomy ------- Cool.io builds on two core classes which bind to the libev API: * Cool.io::Loop - This class represents an event loop which uses underlying high performance system calls to wait for events. * Cool.io::Watcher - This is the base class for event observers. Once you attach an event observer to a loop and start running it, you will begin receiving callbacks to particlar methods when events occur. Watchers -------- There are presently four types of watchers: * Cool.io::IOWatcher - This class waits for an IO object to become readable, writable, or both. * Cool.io::TimerWatcher - This class waits for a specified duration then fires an event. You can also configure it to fire an event at specified intervals. * Cool.io::StatWatcher - Monitors files or directories for changes * Cool.io::AsyncWatcher - Can be used to wake up a Cool.io::Loop running in a different thread. This allows each thread to run a separate Cool.io::Loop and for the different event loops to be able to signal each other. Using Watchers -------------- Watchers have five important methods: * attach(loop) - This binds a watcher to the specified event loop. If the watcher is already bound to a loop it will be detached first, then attached to the new one. * detach - This completely unbinds a watcher from an event loop. * disable - This stops the watcher from receiving events but does not unbind it from the loop. If you are trying to toggle a watcher on and off, it's best to use this method (and enable) as it performs better than completely removing the watcher from the event loop. * enable - This re-enables a watcher which has been disabled in the past. The watcher must still be bound to an event loop. * evloop - This returns the Cool.io::Loop object which the watcher is currently bound to. Asynchronous Wrappers --------------------- Several classes which provide asynchronous event-driven wrappers for Ruby's core socket classes are also provided. Among these are: * Cool.io::TCPSocket - A buffered wrapper to core Ruby's Socket class for use with TCP sockets. You can asynchronously create outgoing TCP connections using its Cool.io::TCPSocket.connect method. Cool.io::TCPSocket provides write buffering to ensure that writing never blocks, and has asynchronous callbacks for several events, including when the connection is opened (or failed), when data is received, when the write buffer has been written out completely, and when the connection closes. * Cool.io::TCPServer - A wrapper for TCPServer which creates new instances of Cool.io::TCPSocket (or any subclass you wish to provide) whenever an incoming connection is received. Example Program --------------- Cool.io provides a Sinatra-like DSL for authoring event-driven programs: ```ruby require 'cool.io' require 'cool.io/dsl' ADDR = '127.0.0.1' PORT = 4321 cool.io.connection :echo_server_connection do on_connect do puts "#{remote_addr}:#{remote_port} connected" end on_close do puts "#{remote_addr}:#{remote_port} disconnected" end on_read do |data| write data end end puts "Echo server listening on #{ADDR}:#{PORT}" cool.io.server ADDR, PORT, :echo_server_connection cool.io.run ``` This creates a new connection class called :echo_server_connection and defines a set of callbacks for when various events occur. We then create a new server on the given address and port. When this server receives new connections, it will create new instances of the given connection class for each connection. Finally, we kick everything off with cool.io.run. Calling cool.io.run will block, listening for events on our server. Using Cool.io subclasses directly --------------------------------- Below is an example of how to write an echo server using a subclass instead of the DSL: ```ruby require 'cool.io' HOST = 'localhost' PORT = 4321 class EchoServerConnection < Cool.io::TCPSocket def on_connect puts "#{remote_addr}:#{remote_port} connected" end def on_close puts "#{remote_addr}:#{remote_port} disconnected" end def on_read(data) write data end end server = Cool.io::TCPServer.new(HOST, PORT, EchoServerConnection) server.attach(Cool.io::Loop.default) puts "Echo server listening on #{HOST}:#{PORT}" Cool.io::Loop.default.run ``` Here a new observer type (EchoServerConnection) is made by subclassing an existing one and adding new implementations to existing event handlers. A new event loop is created, and a new Cool.io::TCPServer (whose base class is Cool.io::Watcher) is created and attached to the event loop. Once this is done, the event loop is started with event_loop.run. This method will block until there are no active watchers for the loop or the loop is stopped explicitly with event_loop.stop. cool.io-1.8.1/examples/0000755000004100000410000000000014632135713014736 5ustar www-datawww-datacool.io-1.8.1/examples/echo_server.rb0000644000004100000410000000104614632135713017570 0ustar www-datawww-data$LOAD_PATH.unshift File.expand_path('../../lib', __FILE__) require 'rubygems' require 'cool.io' ADDR = '127.0.0.1' PORT = 4321 class EchoServerConnection < Cool.io::TCPSocket def on_connect puts "#{remote_addr}:#{remote_port} connected" end def on_close puts "#{remote_addr}:#{remote_port} disconnected" end def on_read(data) write data end end event_loop = Cool.io::Loop.default Cool.io::TCPServer.new(ADDR, PORT, EchoServerConnection).attach(event_loop) puts "Echo server listening on #{ADDR}:#{PORT}" event_loop.run cool.io-1.8.1/examples/google.rb0000644000004100000410000000036714632135713016545 0ustar www-datawww-data$LOAD_PATH.unshift File.expand_path('../../lib', __FILE__) require 'rubygems' require 'cool.io' l = Coolio::Loop.default c = Coolio::HttpClient.connect("www.google.com", 80).attach(l) c.request('GET', '/search', :query => { :q => 'feces'}) l.runcool.io-1.8.1/examples/dslified_echo_server.rb0000644000004100000410000000063514632135713021436 0ustar www-datawww-data$LOAD_PATH.unshift File.expand_path('../../lib', __FILE__) require 'rubygems' require 'cool.io' ADDR = '127.0.0.1' PORT = 4321 cool.io.server ADDR, PORT do on_connect do puts "#{remote_addr}:#{remote_port} connected" end on_close do puts "#{remote_addr}:#{remote_port} disconnected" end on_read do |data| write data end end puts "Echo server listening on #{ADDR}:#{PORT}" cool.io.runcool.io-1.8.1/examples/echo_client.rb0000644000004100000410000000140414632135713017536 0ustar www-datawww-data$LOAD_PATH.unshift File.expand_path('../../lib', __FILE__) require 'rubygems' require 'cool.io' ADDR = '127.0.0.1' PORT = 4321 class ClientConnection < Cool.io::TCPSocket def on_connect puts "#{remote_addr}:#{remote_port} connected" write "bounce this back to me" end def on_close puts "#{remote_addr}:#{remote_port} disconnected" end def on_read(data) print "got #{data}" close end def on_resolve_failed print "DNS resolve failed" end def on_connect_failed print "connect failed, meaning our connection to their port was rejected" end end event_loop = Cool.io::Loop.default client = ClientConnection.connect(ADDR, PORT) client.attach(event_loop) puts "Echo client connecting to #{ADDR}:#{PORT}..." event_loop.run cool.io-1.8.1/examples/dslified_echo_client.rb0000644000004100000410000000122614632135713021403 0ustar www-datawww-data$LOAD_PATH.unshift File.expand_path('../../lib', __FILE__) require 'rubygems' require 'cool.io' ADDR = '127.0.0.1' PORT = 4321 cool.io.connect ADDR, PORT do on_connect do puts "Connected to #{remote_host}:#{remote_port}" write "bounce this back to me" end on_close do puts "Disconnected from #{remote_host}:#{remote_port}" end on_read do |data| puts "Got: #{data}" close end on_resolve_failed do puts "Error: Couldn't resolve #{remote_host}" end on_connect_failed do puts "Error: Connection refused to #{remote_host}:#{remote_port}" end end puts "Echo client connecting to #{ADDR}:#{PORT}..." cool.io.run cool.io-1.8.1/examples/callbacked_echo_server.rb0000644000004100000410000000106214632135713021713 0ustar www-datawww-data$LOAD_PATH.unshift File.expand_path('../../lib', __FILE__) require 'rubygems' require 'cool.io' ADDR = '127.0.0.1' PORT = 4321 event_loop = Cool.io::Loop.default server = Cool.io::TCPServer.new(ADDR, PORT) do |connection| puts "#{connection.remote_addr}:#{connection.remote_port} connected" connection.on_close do puts "#{connection.remote_addr}:#{connection.remote_port} disconnected" end connection.on_read do |data| connection.write data end end server.attach(event_loop) puts "Echo server listening on #{ADDR}:#{PORT}" event_loop.run cool.io-1.8.1/appveyor.yml0000644000004100000410000000054514632135713015514 0ustar www-datawww-data--- install: - SET PATH=C:\Ruby%ruby_version%\bin;%PATH% - ruby --version - gem --version - bundle install build: off test_script: - bundle exec rake -rdevkit environment: matrix: - ruby_version: "200" - ruby_version: "200-x64" - ruby_version: "21" - ruby_version: "21-x64" - ruby_version: "22" - ruby_version: "22-x64"