rainbows-5.0.0/0000755000004100000410000000000012641135250013367 5ustar www-datawww-datarainbows-5.0.0/bin/0000755000004100000410000000000012641135250014137 5ustar www-datawww-datarainbows-5.0.0/bin/rainbows0000755000004100000410000000705512641135250015720 0ustar www-datawww-data#!/this/will/be/overwritten/or/wrapped/anyways/do/not/worry/ruby # -*- encoding: binary -*- require 'unicorn/launcher' require 'rainbows' require 'optparse' ENV["RACK_ENV"] ||= "development" rackup_opts = Unicorn::Configurator::RACKUP options = rackup_opts[:options] op = OptionParser.new("", 24, ' ') do |opts| cmd = File.basename($0) opts.banner = "Usage: #{cmd} " \ "[ruby options] [#{cmd} options] [rackup config file]" opts.separator "Ruby options:" lineno = 1 opts.on("-e", "--eval LINE", "evaluate a LINE of code") do |line| eval line, TOPLEVEL_BINDING, "-e", lineno lineno += 1 end opts.on("-d", "--debug", "set debugging flags (set $DEBUG to true)") do $DEBUG = true end opts.on("-w", "--warn", "turn warnings on for your script") do $-w = true end opts.on("-I", "--include PATH", "specify $LOAD_PATH (may be used more than once)") do |path| $LOAD_PATH.unshift(*path.split(/:/)) end opts.on("-r", "--require LIBRARY", "require the library, before executing your script") do |library| require library end opts.separator "#{cmd} options:" # some of these switches exist for rackup command-line compatibility, opts.on("-o", "--host HOST", "listen on HOST (default: #{Unicorn::Const::DEFAULT_HOST})") do |h| rackup_opts[:host] = h rackup_opts[:set_listener] = true end opts.on("-p", "--port PORT", "use PORT (default: #{Unicorn::Const::DEFAULT_PORT})") do |p| rackup_opts[:port] = p.to_i rackup_opts[:set_listener] = true end opts.on("-E", "--env RACK_ENV", "use RACK_ENV for defaults (default: development)") do |e| ENV["RACK_ENV"] = e end opts.on("-N", "--no-default-middleware", "do not load middleware implied by RACK_ENV") do |e| rackup_opts[:no_default_middleware] = true end opts.on("-D", "--daemonize", "run daemonized in the background") do |d| rackup_opts[:daemonize] = !!d end opts.on("-P", "--pid FILE", "DEPRECATED") do |f| warn "Use of --pid/-P is strongly discouraged" warn "Use the 'pid' directive in the Rainbows!/Unicorn config file instead" options[:pid] = f end opts.on("-s", "--server SERVER", "this flag only exists for compatibility") do |s| warn "-s/--server only exists for compatibility with rackup" end # Rainbows!/Unicorn-specific stuff opts.on("-l", "--listen {HOST:PORT|PATH}", "listen on HOST:PORT or PATH", "this may be specified multiple times", "(default: #{Unicorn::Const::DEFAULT_LISTEN})") do |address| options[:listeners] << address end opts.on("-c", "--config-file FILE", "Rainbows!-specific config file") do |f| options[:config_file] = f end # I'm avoiding Unicorn-specific config options on the command-line. # IMNSHO, config options on the command-line are redundant given # config files and make things unnecessarily complicated with multiple # places to look for a config option. opts.separator "Common options:" opts.on_tail("-h", "--help", "Show this message") do puts opts.to_s.gsub(/^.*DEPRECATED.*$/s, '') exit end opts.on_tail("-v", "--version", "Show version") do puts "Rainbows! v#{Rainbows::Const::RAINBOWS_VERSION}" exit end opts.parse! ARGV end app = Unicorn.builder(ARGV[0] || 'config.ru', op) op = nil if $DEBUG require 'pp' pp({ unicorn_options: options, app: app, daemonize: rackup_opts[:daemonize], }) end Unicorn::Launcher.daemonize!(options) if rackup_opts[:daemonize] Rainbows::HttpServer.new(app, options).start.join rainbows-5.0.0/.gitattributes0000644000004100000410000000012512641135250016260 0ustar www-datawww-data*.gemspec diff=ruby *.rb diff=ruby *.ru diff=ruby Rakefile diff=ruby bin/* diff=ruby rainbows-5.0.0/examples/0000755000004100000410000000000012641135250015205 5ustar www-datawww-datarainbows-5.0.0/examples/reverse_proxy.ru0000644000004100000410000000031212641135250020465 0ustar www-datawww-data# see Rainbows::ReverseProxy RDoc cfg = { :upstreams => [ "/tmp/.r.sock", "http://bogomips.org/", [ "http://10.6.6.6:666/", { :weight => 666 } ], ] } run Rainbows::ReverseProxy.new(cfg) rainbows-5.0.0/.olddoc.yml0000644000004100000410000000070112641135250015432 0ustar www-datawww-data--- cgit_url: http://bogomips.org/rainbows.git git_url: git://bogomips.org/rainbows.git rdoc_url: http://rainbows.bogomips.org/ merge_html: rainbows_1: Documentation/rainbows.1.html Summary: Documentation/comparison.html public_email: rainbows-public@bogomips.org private_email: rainbows@bogomips.org noindex: - TODO - LATEST - Unicorn - Unicorn::Configurator - Unicorn::SocketHelper - EM - Revactor ml_url: http://bogomips.org/rainbows-public/ rainbows-5.0.0/t/0000755000004100000410000000000012641135250013632 5ustar www-datawww-datarainbows-5.0.0/t/t0050-response-body-close-has-env.sh0000644000004100000410000000376312641135250022200 0ustar www-datawww-data#!/bin/sh . ./test-lib.sh skip_models StreamResponseEpoll t_plan 29 "keepalive does not clear Rack env prematurely for $model" t_begin "setup and start" && { rainbows_setup rtmpfiles curl_out curl_err echo "preload_app true" >> $unicorn_config rainbows -D close-has-env.ru -c $unicorn_config rainbows_wait_start } req_pipelined () { pfx=$1 t_begin "make pipelined requests to trigger $pfx response body" && { > $r_out rm -f $ok ( cat $fifo > $tmp & printf 'GET /%s/1 HTTP/1.1\r\n' $pfx printf 'Host: example.com\r\n\r\n' printf 'GET /%s/2 HTTP/1.1\r\n' $pfx printf 'Host: example.com\r\n\r\n' printf 'GET /%s/3 HTTP/1.1\r\n' $pfx printf 'Host: example.com\r\n' printf 'Connection: close\r\n\r\n' wait echo ok > $ok ) | socat - TCP4:$listen > $fifo test xok = x$(cat $ok) } } reload () { t_begin 'reloading Rainbows! to ensure writeout' && { # ensure worker is loaded before HUP rm -f $curl_err $curl_out curl -vs http://$listen/ >$curl_out 2> $curl_err # reload to ensure everything is flushed kill -HUP $rainbows_pid test xSTART = x"$(cat $fifo)" } } check_log () { pfx="$1" t_begin "check body close messages" && { < $r_out awk ' /^path_info=\/'$pfx'\/[1-3]$/ { next } { exit(2) } END { exit(NR == 3 ? 0 : 1) } ' } } req_keepalive () { pfx="$1" t_begin "make keepalive requests to trigger $pfx response body" && { > $r_out rm -f $curl_err $curl_out curl -vsSf http://$listen/$pfx/[1-3] 2> $curl_err > $curl_out } } req_keepalive file reload check_log file req_pipelined file reload check_log file req_keepalive blob reload check_log blob req_pipelined blob reload check_log blob req_keepalive pipe reload check_log pipe req_pipelined pipe reload check_log pipe t_begin "enable sendfile gem" && { echo "require 'sendfile'" >> $unicorn_config } reload req_keepalive file reload check_log file req_pipelined file reload check_log file t_begin "killing succeeds" && { kill $rainbows_pid } t_begin "check stderr" && { check_stderr } t_done rainbows-5.0.0/t/bin/0000755000004100000410000000000012641135250014402 5ustar www-datawww-datarainbows-5.0.0/t/bin/sha1sum.rb0000755000004100000410000000133612641135250016316 0ustar www-datawww-data#!/usr/bin/env ruby # -*- encoding: binary -*- # Reads from stdin and outputs the SHA1 hex digest of the input this is # ONLY used as a last resort, our test code will try to use sha1sum(1), # openssl(1), or gsha1sum(1) before falling back to using this. We try # all options first because we have a strong and healthy distrust of our # Ruby abilities in general, and *especially* when it comes to # understanding (and trusting the implementation of) Ruby 1.9 encoding. require 'digest/sha1' $stdout.sync = $stderr.sync = true $stdout.binmode $stdin.binmode bs = 16384 digest = Digest::SHA1.new if buf = $stdin.read(bs) begin digest.update(buf) end while $stdin.read(bs, buf) end $stdout.syswrite("#{digest.hexdigest}\n") rainbows-5.0.0/t/bin/unused_listen0000755000004100000410000000233612641135250017215 0ustar www-datawww-data#!/usr/bin/env ruby # -*- encoding: binary -*- # this is to remain compatible with the unused_port function in the # Unicorn test/test_helper.rb file require 'socket' require 'tmpdir' default_port = 8080 addr = ENV['UNICORN_TEST_ADDR'] || '127.0.0.1' retries = 100 base = 5000 port = sock = lock_path = nil begin begin port = base + rand(32768 - base) while port == default_port port = base + rand(32768 - base) end sock = Socket.new(Socket::AF_INET, Socket::SOCK_STREAM, 0) sock.bind(Socket.pack_sockaddr_in(port, addr)) sock.listen(5) rescue Errno::EADDRINUSE, Errno::EACCES sock.close rescue nil retry if (retries -= 1) >= 0 end # since we'll end up closing the random port we just got, there's a race # condition could allow the random port we just chose to reselect itself # when running tests in parallel with gmake. Create a lock file while # we have the port here to ensure that does not happen. lock_path = "#{Dir::tmpdir}/unicorn_test.#{addr}:#{port}.lock" lock = File.open(lock_path, File::WRONLY|File::CREAT|File::EXCL, 0600) rescue Errno::EEXIST sock.close rescue nil retry end sock.close rescue nil puts %Q(listen=#{addr}:#{port} T_RM_LIST="$T_RM_LIST #{lock_path}") rainbows-5.0.0/t/bin/content-md5-put0000755000004100000410000000173612641135250017302 0ustar www-datawww-data#!/usr/bin/env ruby # -*- encoding: binary -*- # simple chunked HTTP PUT request generator (and just that), # it reads stdin and writes to stdout so socat can write to a # UNIX or TCP socket (or to another filter or file) along with # a Content-MD5 trailer. require 'digest/md5' $stdout.sync = $stderr.sync = true $stdout.binmode $stdin.binmode bs = ENV['bs'] ? ENV['bs'].to_i : 4096 if ARGV.grep("--no-headers").empty? $stdout.write( "PUT / HTTP/1.1\r\n" \ "Host: example.com\r\n" \ "Connection: #{ENV["Connection"] || 'close'}\r\n" \ "Transfer-Encoding: chunked\r\n" \ "Trailer: Content-MD5\r\n" \ "\r\n" ) end digest = Digest::MD5.new if buf = $stdin.readpartial(bs) begin digest.update(buf) $stdout.write("%x\r\n" % [ buf.size ]) $stdout.write(buf) $stdout.write("\r\n") end while $stdin.read(bs, buf) end digest = [ digest.digest ].pack('m').strip $stdout.write("0\r\n") $stdout.write("Content-MD5: #{digest}\r\n\r\n") rainbows-5.0.0/t/async-response.ru0000644000004100000410000000034312641135250017153 0ustar www-datawww-datause Rack::Chunked use Rainbows::DevFdResponse run lambda { |env| io = IO.popen('for i in 0 1 2 3 4 5 6 7 8 9; do date; sleep 1; done', 'rb') [ 200, { 'Content-Type' => 'text/plain', }, io ].freeze } rainbows-5.0.0/t/t0500-cramp-streaming.sh0000755000004100000410000000335712641135250020040 0ustar www-datawww-data#!/bin/sh . ./test-lib.sh case $model in disabled) ;; *) t_info "skipping $T since it's not compatible with $model" exit 0 ;; esac RUBYLIB=$($RUBY test_isolate_cramp.rb):$RUBYLIB export RUBYLIB require_check cramp Cramp::VERSION t_plan 7 "streaming test for Cramp" CONFIG_RU=cramp/streaming.ru t_begin "setup and start" && { rainbows_setup rtmpfiles a b c curl_err expect # requiring Rubygems for this test only since Cramp depends on # pre versions of several gems # Like the rest of the EM/async stuff, it's not Rack::Lint compatible rainbows -E deployment -D $CONFIG_RU -c $unicorn_config rainbows_wait_start } # this will spew any unexpected input to stdout and be silent on success check () { ( i=0 while read hello world do t1=$(unix_time) diff=$(($t1 - $t0)) t_info "i=$i diff=$diff hello=$hello world=$world" test $diff -ge 1 || echo "$i: diff: $diff < 1 second" t0=$t1 test xHello = x"$hello" || echo "$i: Hello != $hello" test xWorld = x"$world" || echo "$i: World != $world" i=$(($i + 1)) test $i -le 3 || echo "$i: $i > 3" done ) } t_begin "send async requests off in parallel" && { t0=$(unix_time) curl --no-buffer -sSf http://$listen/ 2>> $curl_err | check >$a 2>&1 & curl --no-buffer -sSf http://$listen/ 2>> $curl_err | check >$b 2>&1 & curl --no-buffer -sSf http://$listen/ 2>> $curl_err | check >$c 2>&1 & } t_begin "wait for curl terminations" && { wait t1=$(unix_time) elapsed=$(( $t1 - $t0 )) t_info "elapsed=$elapsed (should be 4-5s)" } t_begin "termination signal sent" && { kill $rainbows_pid } t_begin "no errors from curl" && { test ! -s $curl_err } t_begin "no errors in stderr" && check_stderr t_begin "silence is golden" && { test ! -s $a test ! -s $b test ! -s $c } t_done rainbows-5.0.0/t/t9100.ru0000644000004100000410000000034212641135250014756 0ustar www-datawww-datause Rack::ContentLength use Rack::ContentType, 'text/plain' use Rainbows::ThreadTimeout, :timeout => 1 run lambda { |env| if env["PATH_INFO"] =~ %r{/([\d\.]+)\z} Rainbows.sleep($1.to_f) end [ 200, [], [ "HI\n" ] ] } rainbows-5.0.0/t/t0113.ru0000644000004100000410000000033312641135250014751 0ustar www-datawww-data#\ -E none use Rack::ContentLength use Rack::ContentType, 'text/plain' app = lambda do |env| case env['rack.input'] when Unicorn::StreamInput [ 200, {}, %w(OK) ] else [ 500, {}, %w(NO) ] end end run app rainbows-5.0.0/t/t0001-unix-http.sh0000755000004100000410000000456712641135250016707 0ustar www-datawww-data#!/bin/sh . ./test-lib.sh skip_models StreamResponseEpoll t_plan 19 "simple HTTP connection keepalive/pipelining tests for $model" t_begin "checking for config.ru for $model" && { tbase=simple-http_$model.ru test -f "$tbase" } t_begin "setup and start" && { rtmpfiles unix_socket rainbows_setup echo "listen '$unix_socket'" >> $unicorn_config rainbows -D $tbase -c $unicorn_config rainbows_wait_start } t_begin "pid file exists" && { test -f $pid } t_begin "single TCP request" && { curl -sSfv http://$listen/ } t_begin "handles client EOF gracefully" && { printf 'GET / HTTP/1.1\r\nHost: example.com\r\n\r\n' | \ socat - UNIX:$unix_socket > $tmp dbgcat tmp if grep 'HTTP.* 500' $tmp then die "500 error returned on client shutdown(SHUT_WR)" fi check_stderr } dbgcat r_err t_begin "pipelining partial requests" && { req='GET / HTTP/1.1\r\nHost: example.com\r\n' ( cat $fifo > $tmp & printf "$req"'\r\n'"$req" sleep 1 printf 'Connection: close\r\n\r\n' wait echo ok > $ok ) | socat - UNIX:$unix_socket > $fifo } dbgcat tmp t_begin "two HTTP/1.1 responses" && { test 2 -eq $(grep '^HTTP/1.1' $tmp | count_lines) } t_begin "two HTTP/1.1 200 OK responses" && { test 2 -eq $(grep '^HTTP/1.1 200 OK' $tmp | count_lines) } t_begin 'one "Connection: keep-alive" response' && { test 1 -eq $(grep '^Connection: keep-alive' $tmp | count_lines) } t_begin 'one "Connection: close" response' && { test 1 -eq $(grep '^Connection: close' $tmp | count_lines) } t_begin 'check subshell success' && { test x"$(cat $ok)" = xok } t_begin "check stderr" && { check_stderr } t_begin "burst pipelining requests" && { req='GET / HTTP/1.1\r\nHost: example.com\r\n' ( cat $fifo > $tmp & printf "$req"'\r\n'"$req"'Connection: close\r\n\r\n' wait echo ok > $ok ) | socat - UNIX:$unix_socket > $fifo } dbgcat tmp dbgcat r_err t_begin "two HTTP/1.1 responses" && { test 2 -eq $(grep '^HTTP/1.1' $tmp | count_lines) } t_begin "two HTTP/1.1 200 OK responses" && { test 2 -eq $(grep '^HTTP/1.1 200 OK' $tmp | count_lines) } t_begin 'one "Connection: keep-alive" response' && { test 1 -eq $(grep '^Connection: keep-alive' $tmp | count_lines) } t_begin 'one "Connection: close" response' && { test 1 -eq $(grep '^Connection: close' $tmp | count_lines) } t_begin 'check subshell success' && { test x"$(cat $ok)" = xok } t_begin "killing succeeds" && { kill $rainbows_pid } t_done rainbows-5.0.0/t/fork-sleep.ru0000644000004100000410000000045612641135250016256 0ustar www-datawww-data# we do not want Rack::Lint or anything to protect us use Rack::ContentLength use Rack::ContentType, "text/plain" trap(:CHLD) { $stderr.puts Process.waitpid2(-1).inspect } map "/" do time = ENV["nr"] || '15' pid = fork { exec('sleep', time) } run lambda { |env| [ 200, {}, [ "#{pid}\n" ] ] } end rainbows-5.0.0/t/simple-http_Revactor.ru0000644000004100000410000000036512641135250020321 0ustar www-datawww-datause Rack::ContentLength use Rack::ContentType run lambda { |env| if env['rack.multithread'] == false && env['rainbows.model'] == :Revactor [ 200, {}, [ Thread.current.inspect << "\n" ] ] else raise "rack.multithread is true" end } rainbows-5.0.0/t/simple-http_ThreadPool.ru0000644000004100000410000000036212641135250020572 0ustar www-datawww-datause Rack::ContentLength use Rack::ContentType run lambda { |env| if env['rack.multithread'] && env['rainbows.model'] == :ThreadPool [ 200, {}, [ Thread.current.inspect << "\n" ] ] else raise "rack.multithread is not true" end } rainbows-5.0.0/t/simple-http_WriterThreadSpawn.ru0000644000004100000410000000036612641135250022152 0ustar www-datawww-datause Rack::ContentLength use Rack::ContentType run lambda { |env| if env['rack.multithread'] && env['rainbows.model'] == :WriterThreadSpawn [ 200, {}, [ Thread.current.inspect << "\n" ] ] else raise "rack.multithread is false" end } rainbows-5.0.0/t/t0200-async-response.sh0000755000004100000410000000260512641135250017710 0ustar www-datawww-data#!/bin/sh CONFIG_RU=${CONFIG_RU-'async-response.ru'} . ./test-lib.sh skip_models Base WriterThreadPool WriterThreadSpawn skip_models StreamResponseEpoll case $CONFIG_RU in *no-autochunk.ru) t_plan 7 "async response w/o autochunk for $model" skip_autochunk=true ;; *) t_plan 6 "async response for $model" skip_autochunk=false ;; esac t_begin "setup and start" && { rainbows_setup rtmpfiles a b c curl_err # can't load Rack::Lint here since it'll cause Rev to slurp rainbows -E none -D $CONFIG_RU -c $unicorn_config rainbows_wait_start } t_begin "send async requests off in parallel" && { t0=$(unix_time) ( curl --no-buffer -sSf http://$listen/ 2>> $curl_err | tee $a) & ( curl --no-buffer -sSf http://$listen/ 2>> $curl_err | tee $b) & ( curl --no-buffer -sSf http://$listen/ 2>> $curl_err | tee $c) & wait t1=$(unix_time) } t_begin "ensure elapsed requests were processed in parallel" && { elapsed=$(( $t1 - $t0 )) echo "elapsed=$elapsed < 30" test $elapsed -lt 30 } t_begin "termination signal sent" && { kill $rainbows_pid } dbgcat a dbgcat b dbgcat c dbgcat r_err dbgcat curl_err t_begin "no errors from curl" && { test ! -s $curl_err } t_begin "no errors in stderr" && check_stderr dbgcat r_err if $skip_autochunk then t_begin "no responses are chunked" && { test x"$(cat $a)" = x0123456789 test x"$(cat $b)" = x0123456789 test x"$(cat $c)" = x0123456789 } fi t_done rainbows-5.0.0/t/t0016-onenine-encoding-is-tricky.sh0000755000004100000410000000104512641135250022074 0ustar www-datawww-data#!/bin/sh . ./test-lib.sh t_plan 4 "proper handling of onenine encoding for $model" t_begin "setup and startup" && { rainbows_setup $model rainbows -E none -D ./t0016.rb -c $unicorn_config rainbows_wait_start expect_sha1=8ff79d8115f9fe38d18be858c66aa08a1cc27a66 } t_begin "response matches expected" && { rm -f $ok ( curl -sSf http://$listen/ && echo ok > $ok ) | rsha1 > $tmp test x$expect_sha1 = x"$(cat $tmp)" } t_begin "shutdown server" && { kill -QUIT $rainbows_pid } dbgcat r_err t_begin "check stderr" && check_stderr t_done rainbows-5.0.0/t/simple-http_NeverBlock.ru0000644000004100000410000000040712641135250020563 0ustar www-datawww-datause Rack::ContentLength use Rack::ContentType run lambda { |env| if env['rack.multithread'] == false && EM.reactor_running? && env['rainbows.model'] == :NeverBlock [ 200, {}, [ Thread.current.inspect << "\n" ] ] else raise env.inspect end } rainbows-5.0.0/t/t0005-large-file-response.sh0000755000004100000410000000373412641135250020611 0ustar www-datawww-data#!/bin/sh . ./test-lib.sh skip_models StreamResponseEpoll test -r random_blob || die "random_blob required, run with 'make $0'" if ! grep -v ^VmRSS: /proc/self/status >/dev/null 2>&1 then t_info "skipping, can't read RSS from /proc/self/status" exit 0 fi t_plan 10 "large file response slurp avoidance for $model" t_begin "setup and startup" && { rtmpfiles curl_out rainbows_setup $model 1 # can't load Rack::Lint here since it'll cause Rev to slurp rainbows -E none -D large-file-response.ru -c $unicorn_config rainbows_wait_start } t_begin "read random blob sha1 and size" && { random_blob_sha1=$(rsha1 < random_blob) random_blob_size=$(count_bytes < random_blob) } t_begin "read current RSS" && { curl -v http://$listen/rss dbgcat r_err rss_before=$(curl -sSfv http://$listen/rss) t_info "rss_before=$rss_before" } t_begin "send a series HTTP/1.1 requests sequentially" && { for i in a b c do sha1=$( (curl -sSfv http://$listen/random_blob && echo ok >$ok) | rsha1) test $sha1 = $random_blob_sha1 test xok = x$(cat $ok) done } # this was a problem during development t_begin "HTTP/1.0 test" && { sha1=$( (curl -0 -sSfv http://$listen/random_blob && echo ok >$ok) | rsha1) test $sha1 = $random_blob_sha1 test xok = x$(cat $ok) } t_begin "HTTP/0.9 test" && { ( printf 'GET /random_blob\r\n' rsha1 < $fifo > $tmp & wait echo ok > $ok ) | socat - TCP:$listen > $fifo test $(cat $tmp) = $random_blob_sha1 test xok = x$(cat $ok) } dbgcat r_err t_begin "read RSS again" && { curl -v http://$listen/rss rss_after=$(curl -sSfv http://$listen/rss) t_info "rss_after=$rss_after" } t_begin "shutdown server" && { kill -QUIT $rainbows_pid } t_begin "compare RSS before and after" && { diff=$(( $rss_after - $rss_before )) # default GC malloc limit in MRI: fudge=$(( 8 * 1024 * 1024 )) t_info "test diff=$diff < orig=$random_blob_size" test $diff -le $(( $random_blob_size + $fudge )) } dbgcat r_err t_begin "check stderr" && check_stderr t_done rainbows-5.0.0/t/t0020-large-sendfile-response.sh0000755000004100000410000000616412641135250021460 0ustar www-datawww-data#!/bin/sh . ./test-lib.sh skip_models StreamResponseEpoll test -r random_blob || die "random_blob required, run with 'make $0'" case $RUBY_ENGINE in ruby) ;; *) t_info "skipping $T since it can't load the sendfile gem, yet" exit 0 ;; esac t_plan 12 "large sendfile response for $model" t_begin "setup and startup" && { rtmpfiles curl_out a b c slow_a slow_b rainbows_setup $model echo 'require "sendfile"' >> $unicorn_config echo 'def (::IO).copy_stream(*x); abort "NO"; end' >> $unicorn_config # can't load Rack::Lint here since it clobbers body#to_path rainbows -E none -D large-file-response.ru -c $unicorn_config rainbows_wait_start } t_begin "read random blob sha1" && { random_blob_sha1=$(rsha1 < random_blob) three_sha1=$(cat random_blob random_blob random_blob | rsha1) } t_begin "send keepalive HTTP/1.1 requests in parallel" && { for i in $a $b $c $slow_a $slow_b do curl -sSf http://$listen/random_blob \ http://$listen/random_blob \ http://$listen/random_blob | rsha1 > $i & done wait for i in $a $b $c $slow_a $slow_b do test x$(cat $i) = x$three_sha1 done } t_begin "send a batch of abortive HTTP/1.1 requests in parallel" && { for i in $a $b $c $slow_a $slow_b do rm -f $i ( curl -sSf --max-time 2 --limit-rate 1K \ http://$listen/random_blob >/dev/null || echo ok > $i ) & done wait } t_begin "all requests timed out" && { for i in $a $b $c $slow_a $slow_b do test x$(cat $i) = xok done } s='$NF ~ /worker_connections=[0-9]+/{gsub(/[^0-9]/,"",$3); print $3; exit}' t_begin "check proc to ensure file is closed properly (Linux only)" && { worker_pid=$(awk "$s" < $r_err) test -n "$worker_pid" if test -d /proc/$worker_pid/fd then if ls -l /proc/$worker_pid/fd | grep random_blob then t_info "random_blob file is open ($model)" fi else t_info "/proc/$worker_pid/fd not found" fi } t_begin "send a bunch of HTTP/1.1 requests in parallel" && { ( curl -sSf --limit-rate 5M http://$listen/random_blob | \ rsha1 > $slow_a ) & ( curl -sSf --limit-rate 6M http://$listen/random_blob | \ rsha1 > $slow_b ) & for i in $a $b $c do ( curl -sSf http://$listen/random_blob | rsha1 > $i ) & done wait for i in $a $b $c $slow_a $slow_b do test x$(cat $i) = x$random_blob_sha1 done } # this was a problem during development t_begin "HTTP/1.0 test" && { sha1=$( (curl -0 -sSf http://$listen/random_blob && echo ok >$ok) | rsha1) test $sha1 = $random_blob_sha1 test xok = x$(cat $ok) } t_begin "HTTP/0.9 test" && { ( printf 'GET /random_blob\r\n' rsha1 < $fifo > $tmp & wait echo ok > $ok ) | socat - TCP:$listen > $fifo test $(cat $tmp) = $random_blob_sha1 test xok = x$(cat $ok) } t_begin "check proc to ensure file is closed properly (Linux only)" && { worker_pid=$(awk "$s" < $r_err) test -n "$worker_pid" if test -d /proc/$worker_pid/fd then if ls -l /proc/$worker_pid/fd | grep random_blob then t_info "random_blob file is open ($model)" fi else t_info "/proc/$worker_pid/fd not found" fi } t_begin "shutdown server" && { kill -QUIT $rainbows_pid } dbgcat r_err t_begin "check stderr" && check_stderr t_done rainbows-5.0.0/t/simple-http_RevThreadSpawn.ru0000644000004100000410000000035012641135250021423 0ustar www-datawww-datause Rack::ContentLength use Rack::ContentType run lambda { |env| if env['rack.multithread'] && env['rainbows.model'] == :RevThreadSpawn [ 200, {}, [ env.inspect << "\n" ] ] else raise "rack.multithread is false" end } rainbows-5.0.0/t/test_isolate_cramp.rb0000644000004100000410000000110312641135250020033 0ustar www-datawww-datarequire 'rubygems' require 'isolate' engine = defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby' path = "tmp/isolate/#{engine}-#{RUBY_VERSION}/cramp" opts = { :system => false, # we want "ruby-1.8.7" and not "ruby-1.8", so disable multiruby :multiruby => false, :path => path, } old_out = $stdout.dup $stdout.reopen($stderr) lock = File.open(__FILE__, "rb") lock.flock(File::LOCK_EX) Isolate.now!(opts) do if engine == "ruby" gem 'cramp', '0.15' end end $stdout.reopen(old_out) dirs = Dir["#{path}/gems/*-*/lib"] puts dirs.map { |x| File.expand_path(x) }.join(':') rainbows-5.0.0/t/simple-http_RevThreadPool.ru0000644000004100000410000000034712641135250021252 0ustar www-datawww-datause Rack::ContentLength use Rack::ContentType run lambda { |env| if env['rack.multithread'] && env['rainbows.model'] == :RevThreadPool [ 200, {}, [ env.inspect << "\n" ] ] else raise "rack.multithread is false" end } rainbows-5.0.0/t/hijack.ru0000644000004100000410000000240312641135250015432 0ustar www-datawww-datause Rack::Lint use Rack::ContentLength use Rack::ContentType, "text/plain" class DieIfUsed def each abort "body.each called after response hijack\n" end def close abort "body.close called after response hijack\n" end end def lazy_close(io) thr = Thread.new do # wait and see if Rainbows! accidentally closes us sleep((ENV["DELAY"] || 10).to_i) begin io.close rescue => e warn "E: #{e.message} (#{e.class})" exit!(3) end end at_exit { thr.join } end run lambda { |env| case env["PATH_INFO"] when "/hijack_req" if env["rack.hijack?"] io = env["rack.hijack"].call if io.respond_to?(:read_nonblock) && env["rack.hijack_io"].respond_to?(:read_nonblock) # exercise both, since we Rack::Lint may use different objects env["rack.hijack_io"].write("HTTP/1.0 200 OK\r\n\r\n") io.write("request.hijacked") lazy_close(io) return [ 500, {}, DieIfUsed.new ] end end [ 500, {}, [ "hijack BAD\n" ] ] when "/hijack_res" r = "response.hijacked" [ 200, { "Content-Length" => r.bytesize.to_s, "rack.hijack" => proc do |io| io.write(r) lazy_close(io) end }, DieIfUsed.new ] end } rainbows-5.0.0/t/async_examples/0000755000004100000410000000000012641135250016645 5ustar www-datawww-datarainbows-5.0.0/t/async_examples/README0000644000004100000410000000025312641135250017525 0ustar www-datawww-dataThese examples in this directory are stolen from Thin 1.2.5 with only trivial changes. All examples in this directory retain their original license (MIT) and copyrights. rainbows-5.0.0/t/async_examples/async_app.ru0000644000004100000410000000731712641135250021202 0ustar www-datawww-data#!/usr/bin/env rackup -s thin # # async_app.ru # raggi/thin # # A second demo app for async rack + thin app processing! # Now using http status code 100 instead. # # Created by James Tucker on 2008-06-17. # Copyright 2008 James Tucker . # #-- # Benchmark Results: # # raggi@mbk:~$ ab -c 100 -n 500 http://127.0.0.1:3000/ # This is ApacheBench, Version 2.0.40-dev <$Revision: 1.146 $> apache-2.0 # Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/ # Copyright 2006 The Apache Software Foundation, http://www.apache.org/ # # Benchmarking 127.0.0.1 (be patient) # Completed 100 requests # Completed 200 requests # Completed 300 requests # Completed 400 requests # Finished 500 requests # # # Server Software: thin # Server Hostname: 127.0.0.1 # Server Port: 3000 # # Document Path: / # Document Length: 12 bytes # # Concurrency Level: 100 # Time taken for tests: 5.263089 seconds # Complete requests: 500 # Failed requests: 0 # Write errors: 0 # Total transferred: 47000 bytes # HTML transferred: 6000 bytes # Requests per second: 95.00 [#/sec] (mean) # Time per request: 1052.618 [ms] (mean) # Time per request: 10.526 [ms] (mean, across all concurrent requests) # Transfer rate: 8.55 [Kbytes/sec] received # # Connection Times (ms) # min mean[+/-sd] median max # Connect: 0 3 2.2 3 8 # Processing: 1042 1046 3.1 1046 1053 # Waiting: 1037 1042 3.6 1041 1050 # Total: 1045 1049 3.1 1049 1057 # # Percentage of the requests served within a certain time (ms) # 50% 1049 # 66% 1051 # 75% 1053 # 80% 1053 # 90% 1054 # 95% 1054 # 98% 1056 # 99% 1057 # 100% 1057 (longest request) class DeferrableBody include EventMachine::Deferrable def call(body) body.each do |chunk| @body_callback.call(chunk) end end def each &blk @body_callback = blk end end class AsyncApp # This is a template async response. N.B. Can't use string for body on 1.9 AsyncResponse = [-1, {}, []].freeze def call(env) body = DeferrableBody.new # Get the headers out there asap, let the client know we're alive... EventMachine::next_tick { env['async.callback'].call [200, {'Content-Type' => 'text/plain'}, body] } # Semi-emulate a long db request, instead of a timer, in reality we'd be # waiting for the response data. Whilst this happens, other connections # can be serviced. # This could be any callback based thing though, a deferrable waiting on # IO data, a db request, an http request, an smtp send, whatever. EventMachine::add_timer(1) { body.call ["Woah, async!\n"] EventMachine::next_tick { # This could actually happen any time, you could spawn off to new # threads, pause as a good looking lady walks by, whatever. # Just shows off how we can defer chunks of data in the body, you can # even call this many times. body.call ["Cheers then!"] body.succeed } } # throw :async # Still works for supporting non-async frameworks... AsyncResponse # May end up in Rack :-) end end # The additions to env for async.connection and async.callback absolutely # destroy the speed of the request if Lint is doing it's checks on env. # It is also important to note that an async response will not pass through # any further middleware, as the async response notification has been passed # right up to the webserver, and the callback goes directly there too. # Middleware could possibly catch :async, and also provide a different # async.connection and async.callback. # use Rack::Lint run AsyncApp.new rainbows-5.0.0/t/async_examples/async_tailer.ru0000644000004100000410000000372212641135250021676 0ustar www-datawww-data#!/usr/bin/env rackup -s thin # # async_tailer.ru # raggi/thin # # Tested with 150 spawned tails on OS X # # Created by James Tucker on 2008-06-18. # Copyright 2008 James Tucker . # Uncomment if appropriate for you.. # EM.epoll # EM.kqueue tail_log_file = ENV["TAIL_LOG_FILE"] or abort "TAIL_LOG_FILE= env must be set" unless ::File.file?(tail_log_file) && ::File.readable?(tail_log_file) abort "#{tail_log_file} must be a readable regular file" end class DeferrableBody include EventMachine::Deferrable def initialize @queue = [] # make sure to flush out the queue before closing the connection callback{ until @queue.empty? @queue.shift.each{|chunk| @body_callback.call(chunk) } end } end def schedule_dequeue return unless @body_callback EventMachine::next_tick do next unless body = @queue.shift body.each do |chunk| @body_callback.call(chunk) end schedule_dequeue unless @queue.empty? end end def call(body) @queue << body schedule_dequeue end def each &blk @body_callback = blk schedule_dequeue end end module TailRenderer attr_accessor :callback def receive_data(data) @callback.call([data]) end def unbind @callback.succeed end end class AsyncTailer AsyncResponse = [-1, {}, []].freeze def call(env) body = DeferrableBody.new EventMachine::next_tick do env['async.callback'].call [200, {'Content-Type' => 'text/html'}, body] body.call ["

Async Tailer

"]

    end

    EventMachine::popen("tail -f #{ENV["TAIL_LOG_FILE"]}", TailRenderer) do |t|

      t.callback = body

      # If for some reason we 'complete' body, close the tail.
      body.callback do
        t.close_connection
      end

      # If for some reason the client disconnects, close the tail.
      body.errback do
        t.close_connection
      end

    end

    AsyncResponse
  end

end

run AsyncTailer.new
rainbows-5.0.0/t/simple-http_CoolioThreadPool.ru0000644000004100000410000000035212641135250021736 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env|
  if env['rack.multithread'] && env['rainbows.model'] == :CoolioThreadPool
    [ 200, {}, [ env.inspect << "\n" ] ]
  else
    raise "rack.multithread is false"
  end
}
rainbows-5.0.0/t/t0008-ensure-usable-after-limit.sh0000755000004100000410000000612612641135250021734 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
test -r random_blob || die "random_blob required, run with 'make $0'"

t_plan 14 "ensure we're accounting worker_connections properly"
nr=2

t_begin "setup" && {
	rtmpfiles a b c d
	rainbows_setup $model $nr
	rainbows -D sha1.ru -c $unicorn_config
	rainbows_wait_start
}

null_sha1=da39a3ee5e6b4b0d3255bfef95601890afd80709

t_begin "fire off concurrent processes" && {

	req='POST / HTTP/1.1\r\n'
	req="$req"'Host: example.com\r\n'
	req="$req"'Transfer-Encoding: chunked\r\n\r\n'

	for i in a b c d
	do
		rtmpfiles ${i}_fifo ${i}_tmp
		eval 'i_fifo=$'${i}_fifo
		eval 'i_tmp=$'${i}_tmp
		eval "i=$"$i
		(
			(
				cat $i_fifo > $i_tmp &
				# need a full HTTP request to get around
				# httpready
				printf "$req"
				sleep 5
				printf '0\r\n\r\n'
				wait
				echo ok > $i
			) | socat - TCP:$listen > $i_fifo
		) &
	done
	wait
}

t_begin "check results" && {
	for i in a b c d
	do
		eval 'i_tmp=$'${i}_tmp
		eval "i=$"$i
		test xok = x$(cat $i)
		test x$null_sha1 = x$(tail -1 $i_tmp)
	done
}

t_begin "repeat concurrent tests with faster clients" && {
	for i in a b c d
	do
		eval 'i_tmp=$'${i}_tmp
		eval "i=$"$i
		curl -sSf -T-  $i 2> $i_tmp &
	done
	wait
}

t_begin "check results" && {
	for i in a b c d
	do
		eval 'i_tmp=$'${i}_tmp
		eval "i=$"$i
		test ! -s $i_tmp
		test x$null_sha1 = x$(cat $i)
	done
}

t_begin "fire off truncated concurrent requests" && {

	req='POST / HTTP/1.1\r\n'
	req="$req"'Host: example.com\r\n'
	req="$req"'Transfer-Encoding: chunked\r\n'

	for i in a b c d
	do
		rtmpfiles ${i}_tmp
		eval 'i_tmp=$'${i}_tmp
		eval "i=$"$i
		(
			(
				# need a full HTTP request to get around
				# httpready
				printf "$req"
				echo ok > $i
			) | socat - TCP:$listen > $i_tmp
		) &
	done
	wait
}

t_begin "check broken results" && {
	for i in a b c d
	do
		eval 'i_tmp=$'${i}_tmp
		eval "i=$"$i
		test xok = x$(cat $i)
		dbgcat i_tmp
	done
}

t_begin "repeat concurrent tests with faster clients" && {
	for i in a b c d
	do
		eval 'i_tmp=$'${i}_tmp
		eval "i=$"$i
		curl -sSf -T-  $i 2> $i_tmp &
	done
	wait
}

t_begin "check results" && {
	for i in a b c d
	do
		eval 'i_tmp=$'${i}_tmp
		eval "i=$"$i
		test ! -s $i_tmp
		test x$null_sha1 = x$(cat $i)
	done
}

t_begin "fire off garbage" && {
	for i in a b c d
	do
		rtmpfiles ${i}_fifo ${i}_tmp
		eval 'i_fifo=$'${i}_fifo
		eval 'i_tmp=$'${i}_tmp
		eval "i=$"$i
		(
			(
				cat $i_fifo > $i_tmp &
				dd if=random_blob bs=4096 count=1
				wait
				echo ok > $i
			) | socat - TCP:$listen > $i_fifo
		) &
	done
	wait
}

t_begin "check broken results" && {
	for i in a b c d
	do
		eval 'i_tmp=$'${i}_tmp
		eval "i=$"$i
		test xok = x$(cat $i)
		grep -F 'HTTP/1.1 400 Bad Request' $i_tmp
	done
}

t_begin "repeat concurrent tests with faster clients" && {
	for i in a b c d
	do
		eval 'i_tmp=$'${i}_tmp
		eval "i=$"$i
		curl -sSf -T-  $i 2> $i_tmp &
	done
	wait
}

t_begin "check results" && {
	for i in a b c d
	do
		eval 'i_tmp=$'${i}_tmp
		eval "i=$"$i
		test ! -s $i_tmp
		test x$null_sha1 = x$(cat $i)
	done
}

t_begin "teardown" && {
	kill $rainbows_pid
}

t_done
rainbows-5.0.0/t/t0009.ru0000644000004100000410000000042412641135250014756 0ustar  www-datawww-data# we do not want Rack::Lint or anything to protect us
use Rack::ContentLength
use Rack::ContentType, "text/plain"
map "/" do
  run lambda { |env| [ 200, {}, [ "OK\n" ] ] }
end
map "/raise" do
  run lambda { |env| raise "BAD" }
end
map "/nil" do
  run lambda { |env| nil }
end
rainbows-5.0.0/t/t9101.ru0000644000004100000410000000036412641135250014763 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType, 'text/plain'
use Rainbows::ThreadTimeout, :timeout => 1, :threshold => -1
run lambda { |env|
  if env["PATH_INFO"] =~ %r{/([\d\.]+)\z}
    Rainbows.sleep($1.to_f)
  end
  [ 200, [], [ "HI\n" ] ]
}
rainbows-5.0.0/t/t9000-rack-app-pool.sh0000755000004100000410000000200512641135250017405 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
case $model in
*CoolioThread*|*RevThread*|Thread*|*Fiber*|Revactor|NeverBlock) ;;
*)
	t_info "skipping $T since it's not compatible with $model"
	exit 0
	;;
esac
nr_client=30 APP_POOL_SIZE=4

t_plan 6 "AppPool Rack middleware test for $model"

t_begin "configure and start" && {
	rtmpfiles curl_out curl_err
	rainbows_setup $model 50
	APP_POOL_SIZE=$APP_POOL_SIZE rainbows -D t9000.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "launch $nr_client requests" && {
	start=$(unix_time)
	seq="$(awk "BEGIN{for(i=0;i<$nr_client;++i) print i}" > $curl_out 2>> $curl_err &
	done
	wait
	t_info elapsed=$(( $(unix_time) - $start ))
}

t_begin "kill server" && {
	kill $rainbows_pid
}

t_begin "$APP_POOL_SIZE instances of app were used" && {
	test $APP_POOL_SIZE -eq $(sort < $curl_out | uniq | count_lines)
}

t_begin "no errors in curl stderr" && {
	test ! -s $curl_err
}

t_begin "no errors in Rainbows! stderr" && {
	check_stderr
}

t_done
rainbows-5.0.0/t/large-file-response.ru0000644000004100000410000000071312641135250020046 0ustar  www-datawww-data# lib-large-file-response will stop running if we're not on Linux here
use Rack::ContentLength
use Rack::ContentType
map "/rss" do
  run lambda { |env|
    # on Linux, this is in kilobytes
    GC.start if GC.respond_to?(:start)
    ::File.read("/proc/self/status") =~ /^VmRSS:\s+(\d+)/
    [ 200, {}, [ ($1.to_i * 1024).to_s ] ]
  }
end
map "/pid" do
  run lambda { |env| [ 200, {}, [ "#{Process.pid}\n" ] ] }
end
map "/" do
  run Rack::File.new(Dir.pwd)
end
rainbows-5.0.0/t/close-pipe-to_path-response.ru0000644000004100000410000000146312641135250021536 0ustar  www-datawww-data# must be run without Rack::Lint since that clobbers to_path
class MyMiddleware < Struct.new(:app)
  class Body < Struct.new(:body, :to_path)
    def each(&block); body.each(&block); end
    def close
      c = body.respond_to?(:close)
      ::File.open(ENV['fifo'], 'wb') do |fp|
        fp.syswrite("CLOSING #{body.inspect} #{to_path} (#{c})\n")
      end
      body.close if c
    end
  end

  def call(env)
    status, headers, body = app.call(env)
    body.respond_to?(:to_path) and body = Body.new(body, body.to_path)
    [ status, headers, body ]
  end
end
use MyMiddleware
use Rainbows::DevFdResponse
run(lambda { |env|
  io = IO.popen('cat random_blob', 'rb')
  [ 200,
    {
      'Content-Length' => ::File.stat('random_blob').size.to_s,
      'Content-Type' => 'application/octet-stream',
    },
    io ]
})
rainbows-5.0.0/t/GNUmakefile0000644000004100000410000000704612641135250015713 0ustar  www-datawww-data# we can run tests in parallel with GNU make

all::

pid := $(shell echo $$PPID)

MRI = ruby
RUBY = ruby
rainbows_lib := $(shell cd ../lib && pwd)
-include ../local.mk
ifeq ($(RUBY_VERSION),)
  RUBY_VERSION := $(shell $(RUBY) -e 'puts RUBY_VERSION')
endif

ifeq ($(RUBY_VERSION),)
  $(error unable to detect RUBY_VERSION)
endif

RUBY_ENGINE := $(shell $(RUBY) -e 'puts((RUBY_ENGINE rescue "ruby"))')
export RUBY_VERSION RUBY_ENGINE

ifeq (Linux,$(shell uname -s))
  models += XEpoll
  models += XEpollThreadSpawn
  models += XEpollThreadPool
  models += Epoll
endif
models += WriterThreadPool
models += WriterThreadSpawn
models += ThreadPool
models += ThreadSpawn

models += StreamResponseEpoll

ifeq ($(RUBY_ENGINE),ruby)
  rp := )
  ONENINE := $(shell case $(RUBY_VERSION) in 1.9.*|2.*$(rp) echo true;;esac)
  ifeq ($(ONENINE),true)
    ifeq ($(RUBY_VERSION),1.9.2)
      models += Revactor
    endif
    models += FiberSpawn
    models += FiberPool

    RUBY_LE_2_1 := $(shell $(RUBY) -e 'puts(RUBY_VERSION.to_f <= 2.1)')
    ifeq ($(RUBY_LE_2_1), true)
      models += Coolio
      models += CoolioThreadPool
      models += CoolioThreadSpawn
      models += CoolioFiberSpawn
      models += EventMachine
      models += NeverBlock
    endif
  endif
endif

ifeq ($(RUBY_ENGINE),rbx)
  models += ActorSpawn
endif
all_models := $(models) Base

T = $(wildcard t[0-9][0-9][0-9][0-9]-*.sh)

MODEL_T := $(foreach m,$(all_models),$(addprefix $(m).,$(T)))
$(T): MODELS = $(models)

# some tests can be run with all models
t0000-simple-http.sh: MODELS = $(all_models)
t0001-unix-http.sh: MODELS = $(all_models)
t0002-graceful.sh: MODELS = $(all_models)
t0002-parser-error.sh: MODELS = $(all_models)
t0003-reopen-logs.sh: MODELS = $(all_models)

# recursively run per-model tests
# haven't figured out a good way to make make non-recursive here, yet...
$(T):
	$(MAKE) $(foreach m,$(MODELS),$(addprefix $(m).,$@))

$(all_models):
	$(MAKE) $(filter $@.%,$(MODEL_T))

all:: $(T)

# can't rely on "set -o pipefail" since we don't require bash or ksh93 :<
t_pfx = trash/$@-$(RUBY_ENGINE)-$(RUBY_VERSION)
TEST_OPTS =
# TRACER = strace -f -o $(t_pfx).strace -s 100000
# TRACER = /usr/bin/time -o $(t_pfx).time

ifdef V
  ifeq ($(V),2)
    TEST_OPTS += --trace
  else
    TEST_OPTS += --verbose
  endif
endif

bindir := $(CURDIR)/bin-$(RUBY_ENGINE)-$(RUBY_VERSION)
bin_rainbows := $(bindir)/rainbows
$(bin_rainbows): ruby_bin = $(shell which $(RUBY))
$(bin_rainbows): ../bin/rainbows
	mkdir -p $(@D)
	install -m 755 $^ $@.$(pid)
	$(MRI) -i -p -e '$$_.gsub!(%r{^#!.*$$},"#!$(ruby_bin)")' $@.$(pid)
	mv $@.$(pid) $@

random_blob:
	dd if=/dev/urandom bs=1M count=30 of=$@.$(pid)
	mv $@.$(pid) $@

dependencies := socat curl
deps := $(addprefix .dep+,$(dependencies))
$(deps): dep_bin = $(lastword $(subst +, ,$@))
$(deps):
	@which $(dep_bin) > $@.$(pid) 2>/dev/null || :
	@test -s $@.$(pid) || \
	  { echo >&2 "E '$(dep_bin)' not found in PATH=$(PATH)"; exit 1; }
	@mv $@.$(pid) $@

libs := tmp/isolate/$(RUBY_ENGINE)-$(RUBY_VERSION)/.libs
$(libs): test_isolate.rb
	mkdir -p $(@D)
	$(RUBY) $< > $@+
	mv $@+ $@
t_deps := random_blob $(libs) $(deps) $(bin_rainbows) trash/.gitignore
$(T): $(t_deps)

$(MODEL_T): export model = $(firstword $(subst ., ,$@))
$(MODEL_T): script = $(subst $(model).,,$@)
$(MODEL_T): export RUBY := $(RUBY)
$(MODEL_T): export PATH := $(bindir):$(PATH)
$(MODEL_T): $(t_deps)
	RUBYLIB=$(rainbows_lib):$$(cat $(libs)):$(RUBYLIB) \
	   $(TRACER) $(SHELL) $(SH_TEST_OPTS) $(script) $(TEST_OPTS)

trash/.gitignore:
	mkdir -p $(@D)
	echo '*' > $@

clean:
	$(RM) -r trash/*.log trash/*.code $(bindir)

.PHONY: $(T) clean
rainbows-5.0.0/t/times.ru0000644000004100000410000000023412641135250015322 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType, "text/plain"
run lambda { |env|
  t = Process.times
  [ 200, {}, [ "utime=#{t.utime} stime=#{t.stime}" ] ]
}
rainbows-5.0.0/t/t0400-em-async-app.sh0000755000004100000410000000220112641135250017223 0ustar  www-datawww-data#!/bin/sh
nr=${nr-5}
. ./test-lib.sh
case $model in
NeverBlock|EventMachine) ;;
*)
	t_info "skipping $T since it's not compatible with $model"
	exit 0
	;;
esac

t_plan 7 "async_app test for test for EM"

CONFIG_RU=async_examples/async_app.ru

t_begin "setup and start" && {
	rainbows_setup
	rtmpfiles a b c curl_err expect

	# this does not does not support Rack::Lint
	rainbows -E deployment -D $CONFIG_RU -c $unicorn_config
	rainbows_wait_start
}

t_begin "send async requests off in parallel" && {
	t0=$(unix_time)
	curl --no-buffer -sSf http://$listen/ > $a 2>> $curl_err &
	curl --no-buffer -sSf http://$listen/ > $b 2>> $curl_err &
	curl --no-buffer -sSf http://$listen/ > $c 2>> $curl_err &
}

t_begin "wait for curl terminations" && {
	wait
	t1=$(unix_time)
	elapsed=$(( $t1 - $t0 ))
	t_info "elapsed=$elapsed"
}

t_begin "termination signal sent" && {
	kill $rainbows_pid
}

t_begin "no errors from curl" && {
	test ! -s $curl_err
}

t_begin "no errors in stderr" && check_stderr

t_begin "responses match expected" && {
	echo 'Woah, async!' > $expect
	printf 'Cheers then!' >> $expect
	cmp $expect $a
	cmp $expect $b
	cmp $expect $c
}

t_done
rainbows-5.0.0/t/simple-http_Coolio.ru0000644000004100000410000000035012641135250017752 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env|
  if env['rack.multithread'] == false && env['rainbows.model'] == :Coolio
    [ 200, {}, [ env.inspect << "\n" ] ]
  else
    raise "rack.multithread is true"
  end
}
rainbows-5.0.0/t/README0000644000004100000410000000412312641135250014512 0ustar  www-datawww-data= \Rainbows! test suite - YES OUR TEST SUITE IS CONCURRENT!

These are all integration tests that start the server on random, unused
TCP ports or Unix domain sockets.  They're all designed to run
concurrently with other tests to minimize test time, but tests may be
run independently as well.

We write our tests primarily in Bourne shell because that's what we're
comfortable writing integration tests with.  This test suite is also
easily portable to non-Ruby web servers.

== Requirements

* {Ruby 1.8 or 1.9}[http://www.ruby-lang.org/] (duh!)
* {isolate ~> 2.1.0}[http://github.com/jbarnette/isolate] - for dependencies
* {GNU make}[http://www.gnu.org/software/make/]
* {socat}[http://www.dest-unreach.org/socat/]
* {curl >= 7.18.0}[http://curl.haxx.se/]
* standard UNIX shell utilities (Bourne sh, awk, sed, grep, ...)

We do not use bashisms or any non-portable, non-POSIX constructs
in our shell code.  We use the "pipefail" option if available and
mainly test with {ksh}[http://kornshell.com/], but occasionally
with {dash}[http://gondor.apana.org.au/~herbert/dash/] and
{bash}[http://www.gnu.org/software/bash/], too.

== Running Tests

*BSD users: use "gmake" instead of "make"

To run the entire test suite with 8 tests running at once:

  make -j8

To run one individual test for all concurrency models:

  make t0000-simple-http.sh

To run one individual test for one concurrency model:

  make Revactor.t0000-simple-http.sh

To run all tests for one concurrency model:

  make EventMachine

You may also increase verbosity by setting the "V" variable for
GNU make.  To disable trapping of stdout/stderr:

  make V=1

To enable the "set -x" option in shell scripts to trace execution

  make V=2

== Performance

Some of the tests are rather I/O intensive due to the rewindability
requirement of "rack.input" in the Rack specification and the somewhat
complicated (but awesome!) nature of the TeeInput class leading us to
test it very heavily.  If you have lots of RAM and a large tmpfs
partition, it is advisable to set your TMPDIR and also make the t/trash/
directory a symlink to a directory inside in your TMPDIR.
rainbows-5.0.0/t/t0107-rack-input-limit-zero.sh0000755000004100000410000000242012641135250021106 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll
req_curl_chunked_upload_err_check

t_plan 6 "rack.input client_max_body_size zero"

t_begin "setup and startup" && {
	rtmpfiles curl_out curl_err
	rainbows_setup $model
	ed -s $unicorn_config < $curl_out 2> $curl_err
	dbgcat curl_err
	dbgcat curl_out
	test x"$(cat $curl_out)" = x$empty_sha1
}

t_begin "chunked request" && {
	curl -vsSf -T- < /dev/null -H Expect: \
	  http://$listen/ > $curl_out 2> $curl_err
	dbgcat curl_err
	dbgcat curl_out
	test x"$(cat $curl_out)" = x$empty_sha1
}

t_begin "small input chunked" && {
	rm -f $ok
	echo | \
	  curl -vsSf -T- -H Expect: \
	  http://$listen/ > $curl_out 2> $curl_err || > $ok
	dbgcat curl_err
	dbgcat curl_out
	fgrep 413 $curl_err
	test -e $ok
}

t_begin "small input content-length" && {
	rm -f $ok
	echo > $tmp
	curl -vsSf -T $tmp -H Expect: \
	  http://$listen/ > $curl_out 2> $curl_err || > $ok
	fgrep 413 $curl_err
	dbgcat curl_err
	dbgcat curl_out
	test -e $ok
}

t_begin "shutdown" && {
	kill $rainbows_pid
}

t_done
rainbows-5.0.0/t/t0016.rb0000644000004100000410000000055412641135250014735 0ustar  www-datawww-data# -*- encoding: utf-8 -*-
module T0016
  CHUNK = '©' * 1024 * 1024
  BODY = (1..50).map { CHUNK }
  HEADER = {
    # BODY.inject(0) { |m,c| m += c.bytesize }.to_s,
    'Content-Length' => '104857600',
    'Content-Type' => 'text/plain',
  }

  def self.call(env)
    [ 200, HEADER, BODY ]
  end
end
$0 == __FILE__ and T0016::BODY.each { |x| $stdout.syswrite(x) }
rainbows-5.0.0/t/sha1.ru0000644000004100000410000000070512641135250015040 0ustar  www-datawww-data# SHA1 checksum generator
bs = ENV['bs'] ? ENV['bs'].to_i : 16384
require 'digest/sha1'
use Rack::ContentLength
app = lambda do |env|
  /\A100-continue\z/i =~ env['HTTP_EXPECT'] and
    return [ 100, {}, [] ]
  digest = Digest::SHA1.new
  input = env['rack.input']
  if buf = input.read(bs)
    begin
      digest.update(buf)
    end while input.read(bs, buf)
  end

  [ 200, {'Content-Type' => 'text/plain'}, [ digest.hexdigest << "\n" ] ]
end
run app
rainbows-5.0.0/t/t0043-quit-keepalive-disconnect.sh0000644000004100000410000000215112641135250022011 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
case $model in
Coolio|CoolioThreadSpawn|CoolioThreadPool|EventMachine) ;;
Epoll|XEpoll|XEpollThreadPool|XEpollThreadSpawn) ;;
*)
	t_info "$0 not supported for $model"
	exit 0
	;;
esac

t_plan 7 "keepalive clients disconnected on SIGQUIT for $model"

t_begin "setup and start" && {
	rainbows_setup $model 50 30
	rainbows -E none -D env.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "start a keepalive request" && {
	(
		cat < $fifo > $tmp &
		printf 'GET / HTTP/1.1\r\nHost: example.com\r\n\r\n'
		wait
	) | socat - TCP4:$listen > $fifo &
}

t_begin "wait for response" && {
	while ! tail -1 < $tmp | grep '}$' >/dev/null
	do
		sleep 1
	done
}

t_begin "stop Rainbows! gracefully" && {
	t0=$(unix_time)
	kill -QUIT $rainbows_pid
}

t_begin "keepalive client disconnected quickly" && {
	wait
	diff=$(( $(unix_time) - $t0 ))
	test $diff -le 2 || die "client diff=$diff > 2"
}

t_begin "wait for termination" && {
	while kill -0 $rainbows_pid
	do
		sleep 1
	done
	diff=$(( $(unix_time) - $t0 ))
	test $diff -le 4 || die "server diff=$diff > 4"
}

t_begin "check stderr" && {
	check_stderr
}

t_done
rainbows-5.0.0/t/t0100-rack-input-hammer-content-length.sh0000755000004100000410000000250312641135250023206 0ustar  www-datawww-datanr_client=${nr_client-4}
. ./test-lib.sh
test -r random_blob || die "random_blob required, run with 'make $0'"

# basically we don't trust our own implementation of content-md5-put
# nor our Ruby 1.9 knowledge nor proper use of encodings in Ruby.
# So we try to use things like curl and sha1sum that are implemented
# without the Ruby interpreter to validate our own Ruby internals.

t_plan 7 "concurrent rack.input hammer stress test (content-length)"

t_begin "setup and startup" && {
	rtmpfiles curl_out curl_err
	rainbows_setup $model
	rainbows -D sha1.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "send $nr_client concurrent requests" && {
	start=$(unix_time)
	for i in $(awk "BEGIN{for(i=0;i<$nr_client;++i) print i}" > $curl_out 2>> $curl_err
		) &
	done
	wait
	t_info elapsed=$(( $(unix_time) - $start ))
}

t_begin "kill server" && kill $rainbows_pid

t_begin "got $nr_client responses" && {
	test $nr_client -eq $(count_lines < $curl_out)
}

t_begin "all responses identical" && {
	test 1 -eq $(sort < $curl_out | uniq | count_lines)
}

t_begin "sha1 matches on-disk sha1" && {
	blob_sha1=$(rsha1 < random_blob)
	t_info blob_sha1=$blob_sha1
	test x"$blob_sha1" = x"$(sort < $curl_out | uniq)"
}

t_begin "no errors in stderr log" && check_stderr

t_done
rainbows-5.0.0/t/t0201-async-response-no-autochunk.sh0000755000004100000410000000011712641135250022316 0ustar  www-datawww-data#!/bin/sh
CONFIG_RU=async-response-no-autochunk.ru
. ./t0200-async-response.sh
rainbows-5.0.0/t/t0013-reload-bad-config.sh0000755000004100000410000000166212641135250020200 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
t_plan 7 "reload config.ru error with preload_app true"

t_begin "setup and start" && {
	rainbows_setup
	rtmpfiles ru

	cat > $ru <<\EOF
use Rack::ContentLength
use Rack::ContentType, "text/plain"
x = { "hello" => "world" }
run lambda { |env| [ 200, {}, [ x.inspect << "\n" ] ] }
EOF
	echo 'preload_app true' >> $unicorn_config
	rainbows -D -c $unicorn_config $ru
	rainbows_wait_start
}

t_begin "hit with curl" && {
	out=$(curl -sSf http://$listen/)
	test x"$out" = x'{"hello"=>"world"}'
}

t_begin "introduce syntax error in rackup file" && {
	echo '...' >> $ru
}

t_begin "reload signal succeeds" && {
	kill -HUP $rainbows_pid
	rainbows_wait_start
	wait_for_reload $r_err error
	wait_for_reap
	> $r_err
}

t_begin "hit with curl" && {
	out=$(curl -sSf http://$listen/)
	test x"$out" = x'{"hello"=>"world"}'
}

t_begin "killing succeeds" && {
	kill $rainbows_pid
}

t_begin "check stderr" && {
	check_stderr
}

t_done
rainbows-5.0.0/t/t0105-rack-input-limit-bigger.sh0000755000004100000410000000574712641135250021403 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll
test -r random_blob || die "random_blob required, run with 'make $0'"
req_curl_chunked_upload_err_check

t_plan 10 "rack.input client_max_body_size bigger"

t_begin "setup and startup" && {
	rtmpfiles curl_out curl_err
	rainbows_setup $model
	ed -s $unicorn_config < $curl_out 2> $curl_err || > $ok
	rm -f $tmp
	dbgcat curl_err
	dbgcat curl_out
	if ! grep 413 $curl_err
	then
		# send error as documented in curl(1) manpage
		grep -F '(55)' $curl_err
	fi
	test -e $ok
}

t_begin "stops a large chunked request" && {
	rm -f $ok
	dd if=/dev/zero bs=10485761 count=1 | \
	  curl -vsSf -T- -H Expect: \
	  http://$listen/ > $curl_out 2> $curl_err || > $ok
	dbgcat curl_err
	dbgcat curl_out
	if ! grep 413 $curl_err
	then
		# send error as documented in curl(1) manpage
		grep -F '(55)' $curl_err
	fi
	test -e $ok
}

t_begin "small size sha1 chunked ok" && {
	blob_sha1=b376885ac8452b6cbf9ced81b1080bfd570d9b91
	rm -f $ok
	dd if=/dev/zero bs=256 count=1 | \
	  curl -vsSf -T- -H Expect: \
	  http://$listen/ > $curl_out 2> $curl_err
	dbgcat curl_err
	dbgcat curl_out
	test "$(cat $curl_out)" = $blob_sha1
}

t_begin "small size sha1 content-length ok" && {
	blob_sha1=b376885ac8452b6cbf9ced81b1080bfd570d9b91
	rm -f $ok
	dd if=/dev/zero bs=256 count=1 of=$tmp
	curl -vsSf -T $tmp -H Expect: \
	  http://$listen/ > $curl_out 2> $curl_err
	dbgcat curl_err
	dbgcat curl_out
	test "$(cat $curl_out)" = $blob_sha1
}

t_begin "right size sha1 chunked ok" && {
	blob_sha1=8c206a1a87599f532ce68675536f0b1546900d7a
	rm -f $ok
	dd if=/dev/zero bs=10485760 count=1 | \
	  curl -vsSf -T- -H Expect: \
	  http://$listen/ > $curl_out 2> $curl_err
	dbgcat curl_err
	dbgcat curl_out
	test "$(cat $curl_out)" = $blob_sha1
}

t_begin "right size sha1 content-length ok" && {
	blob_sha1=8c206a1a87599f532ce68675536f0b1546900d7a
	rm -f $ok
	dd if=/dev/zero bs=10485760 count=1 of=$tmp
	curl -vsSf -T $tmp -H Expect: \
	  http://$listen/ > $curl_out 2> $curl_err
	rm -f $tmp
	dbgcat curl_err
	dbgcat curl_out
	test "$(cat $curl_out)" = $blob_sha1
}

t_begin "default size sha1 chunked ok" && {
	blob_sha1=3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3
	rm -f $ok
	dd if=/dev/zero bs=1048576 count=1 | \
	  curl -vsSf -T- -H Expect: \
	  http://$listen/ > $curl_out 2> $curl_err
	dbgcat curl_err
	dbgcat curl_out
	test "$(cat $curl_out)" = $blob_sha1
}

t_begin "default size sha1 content-length ok" && {
	blob_sha1=3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3
	rm -f $ok
	dd if=/dev/zero bs=1048576 count=1 of=$tmp
	curl -vsSf -T $tmp -H Expect: \
	  http://$listen/ > $curl_out 2> $curl_err
	rm -f $tmp
	dbgcat curl_err
	dbgcat curl_out
	test "$(cat $curl_out)" = $blob_sha1
}

t_begin "shutdown" && {
	kill $rainbows_pid
}

t_done
rainbows-5.0.0/t/my-tap-lib.sh0000644000004100000410000001063712641135250016150 0ustar  www-datawww-data#!/bin/sh
# Copyright (c) 2009 Eric Wong 
#
# TAP-producing shell library for POSIX-compliant Bourne shells We do
# not _rely_ on Bourne Again features, though we will use "set -o
# pipefail" from ksh93 or bash 3 if available
#
# Only generic, non-project/non-language-specific stuff goes here.  We
# only have POSIX dependencies for the core tests (without --verbose),
# though we'll enable useful non-POSIX things if they're available.
#
# This test library is intentionally unforgiving, it does not support
# skipping tests nor continuing after any failure.  Any failures
# immediately halt execution as do any references to undefined
# variables.
#
# When --verbose is specified, we always prefix stdout/stderr
# output with "#" to avoid confusing TAP consumers.  Otherwise
# the normal stdout/stderr streams are redirected to /dev/null

# dup normal stdout(fd=1) and stderr (fd=2) to fd=3 and fd=4 respectively
# normal TAP output goes to fd=3, nothing should go to fd=4
exec 3>&1 4>&2

# ensure a sane environment
TZ=UTC LC_ALL=C LANG=C
export LANG LC_ALL TZ
unset CDPATH

# pipefail is non-POSIX, but very useful in ksh93/bash
( set -o pipefail 2>/dev/null ) && set -o pipefail

SED=${SED-sed}

# Unlike other test frameworks, we are unforgiving and bail immediately
# on any failures.  We do this because we're lazy about error handling
# and also because we believe anything broken should not be allowed to
# propagate throughout the rest of the test
set -e
set -u

# name of our test
T=${0##*/}

t_expect_nr=-1
t_nr=0
t_current=
t_complete=false

# list of files to remove unconditionally on exit
T_RM_LIST=

# list of files to remove only on successful exit
T_OK_RM_LIST=

# emit output to stdout, it'll be parsed by the TAP consumer
# so it must be TAP-compliant output
t_echo () {
	echo >&3 "$@"
}

# emits non-parsed information to stdout, it will be prefixed with a '#'
# to not throw off TAP consumers
t_info () {
	t_echo '#' "$@"
}

# exit with an error and print a diagnostic
die () {
	echo >&2 "$@"
	exit 1
}

# our at_exit handler, it'll fire for all exits except SIGKILL (unavoidable)
t_at_exit () {
	code=$?
	set +e
	if test $code -eq 0
	then
		$t_complete || {
			t_info "t_done not called"
			code=1
		}
	elif test -n "$t_current"
	then
		t_echo "not ok $t_nr - $t_current"
	fi
	if test $t_expect_nr -ne -1
	then
		test $t_expect_nr -eq $t_nr || {
			t_info "planned $t_expect_nr tests but ran $t_nr"
			test $code -ne 0 || code=1
		}
	fi
	$t_complete || {
		t_info "unexpected test failure"
		test $code -ne 0 || code=1
	}
	rm -f $T_RM_LIST
	test $code -eq 0 && rm -f $T_OK_RM_LIST
	set +x
	exec >&3 2>&4
	t_close_fds
	exit $code
}

# close test-specific extra file descriptors
t_close_fds () {
	exec 3>&- 4>&-
}

# call this at the start of your test to specify the number of tests
# you plan to run
t_plan () {
	test "$1" -ge 1 || die "must plan at least one test"
	test $t_expect_nr -eq -1 || die "tried to plan twice in one test"
	t_expect_nr=$1
	shift
	t_echo 1..$t_expect_nr "#" "$@"
	trap t_at_exit EXIT
}

_t_checkup () {
	test $t_expect_nr -le 0 && die "no tests planned"
	test -n "$t_current" && t_echo "ok $t_nr - $t_current"
	true
}

# finalizes any previously test and starts a new one
t_begin () {
	_t_checkup
	t_nr=$(( $t_nr + 1 ))
	t_current="$1"

	# just in case somebody wanted to cheat us:
	set -e
}

# finalizes the current test without starting a new one
t_end () {
	_t_checkup
	t_current=
}

# run this to signify the end of your test
t_done () {
	_t_checkup
	t_current=
	t_complete=true
	test $t_expect_nr -eq $t_nr || exit 1
	exit 0
}

# create and assign named-pipes to variable _names_ passed to this function
t_fifos () {
	for _id in "$@"
	do
		_name=$_id
		_tmp=$(mktemp -t $T.$$.$_id.XXXXXXXX)
		eval "$_id=$_tmp"
		rm -f $_tmp
		mkfifo $_tmp
		T_RM_LIST="$T_RM_LIST $_tmp"
	done
}

t_verbose=false t_trace=false

while test "$#" -ne 0
do
	arg="$1"
	shift
	case $arg in
	-v|--verbose) t_verbose=true ;;
	--trace) t_trace=true t_verbose=true ;;
	*) die "Unknown option: $arg" ;;
	esac
done

# we always only setup stdout, nothing should end up in the "real" stderr
if $t_verbose
then
	if test x"$(which mktemp 2>/dev/null)" = x
	then
		die "mktemp(1) not available for --verbose"
	fi
	t_fifos t_stdout t_stderr

	(
		# use a subshell so seds are not waitable
		$SED -e 's/^/#: /' < $t_stdout &
		$SED -e 's/^/#! /' < $t_stderr &
	) &
	wait
	exec > $t_stdout 2> $t_stderr
else
	exec > /dev/null 2> /dev/null
fi

$t_trace && set -x
true
rainbows-5.0.0/t/t9002-server-token.sh0000755000004100000410000000120212641135250017364 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
case $model in
Base) ;;
*) exit 0 ;; # don't waste cycles on trivial stuff :P
esac

t_plan 6 "ServerToken Rack middleware test for $model"

t_begin "configure and start" && {
	rtmpfiles curl_out curl_err
	rainbows_setup
	rainbows -D t9002.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "hit with curl" && {
	curl -sSfiI http://$listen/ > $curl_out 2> $curl_err
}

t_begin "kill server" && {
	kill $rainbows_pid
}

t_begin "no errors in curl stderr" && {
	test ! -s $curl_err
}

t_begin "no errors in Rainbows! stderr" && {
	check_stderr
}

t_begin "Server: token added" && {
	grep Server: $curl_out
}

t_done
rainbows-5.0.0/t/sha1-random-size.ru0000644000004100000410000000162212641135250017265 0ustar  www-datawww-data# SHA1 checksum generator
require 'digest/sha1'
use Rack::ContentLength
cap = 16384
app = lambda do |env|
  /\A100-continue\z/i =~ env['HTTP_EXPECT'] and
    return [ 100, {}, [] ]
  digest = Digest::SHA1.new
  input = env['rack.input']
  case env["PATH_INFO"]
  when "/gets_read_mix"
    warn "GETS_READ_MIX #{env['HTTP_TRANSFER_ENCODING'].inspect}"
    if buf = input.gets
      warn "input.rbuf: #{input.instance_variable_get(:@rbuf).inspect}"
      begin
        digest.update(buf)
        warn "buf.size : #{buf.size}"
      end while input.read(rand(cap), buf)
    end
  when "/each"
    input.each { |buf| digest.update(buf) }
  else
    if buf = input.read(rand(cap))
      begin
        raise "#{buf.size} > #{cap}" if buf.size > cap
        digest.update(buf)
      end while input.read(rand(cap), buf)
    end
  end

  [ 200, {'Content-Type' => 'text/plain'}, [ digest.hexdigest << "\n" ] ]
end
run app
rainbows-5.0.0/t/t0045-client_max_header_size.sh0000755000004100000410000000405512641135250021434 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll

t_plan 11 "client_max_header_size tests for $model"

t_begin "setup Rainbows!" && {
	rainbows_setup $model
}

t_begin "fails with zero size" && {
	ed -s $unicorn_config < $tmp &
		printf 'GET /\r\n'
		wait
		echo ok > $ok
	) | socat - TCP:$listen > $fifo
	wait
	test xok = x"$(cat $ok)"
	test 1 -eq $(count_lines < $tmp)
	grep HTTP_VERSION $tmp && die "unexpected HTTP_VERSION in HTTP/0.9 request"
}

t_begin "HTTP/1.1 request fails" && {
	curl -vsSf http://$listen/ > $tmp 2>&1 && die "unexpected curl success"
	grep '400\( Bad Request\)\?$' $tmp
}

t_begin "increase client_max_header_size on reload" && {
	ed -s $unicorn_config < $tmp
	test 1 -eq $(count_lines < $tmp)
	dbgcat tmp
}

t_begin "no errors in stderr" && {
	check_stderr
}

t_begin "shutdown" && {
	kill $rainbows_pid
}

t_done
rainbows-5.0.0/t/t0025-write-on-close.sh0000755000004100000410000000065412641135250017615 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
t_plan 4 "write-on-close tests for funky response-bodies"

t_begin "setup and start" && {
	rainbows_setup
	rainbows -D -c $unicorn_config write-on-close.ru
	rainbows_wait_start
}

t_begin "write-on-close response body succeeds" && {
	test xGoodbye = x"$(curl -sSf --http1.0 http://$listen/)"
}

t_begin "killing succeeds" && {
	kill $rainbows_pid
}

t_begin "check stderr" && {
	check_stderr
}

t_done
rainbows-5.0.0/t/test_isolate.rb0000644000004100000410000000252012641135250016655 0ustar  www-datawww-datarequire 'rubygems'
require 'isolate'
engine = defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby'

path = "tmp/isolate/#{engine}-#{RUBY_VERSION}"
opts = {
  :system => false,
  # we want "ruby-1.8.7" and not "ruby-1.8", so disable multiruby
  :multiruby => false,
  :path => path,
}

old_out = $stdout.dup
$stdout.reopen($stderr)

lock = File.open(__FILE__, "rb")
lock.flock(File::LOCK_EX)
Isolate.now!(opts) do
  gem 'kgio', '2.10.0'
  gem 'rack', '1.6.4'
  gem 'kcar', '0.5.0'
  gem 'raindrops', '0.13.0'
  gem 'unicorn', '5.0.1'

  if engine == "ruby"
    gem 'sendfile', '1.2.2'
    gem 'eventmachine', '1.0.8'
    gem 'async_sinatra', '1.2.0'
    if RUBY_VERSION.to_f < 2.2
      gem 'cool.io', '1.1.0'
      gem 'neverblock', '0.1.6.2'
    end
  end

  if defined?(::Fiber) && engine == "ruby"
    if RUBY_VERSION.to_f < 2.2
      gem 'revactor', '0.1.5'
      gem 'rack-fiber_pool', '0.9.2' # depends on EM
    end
  end

  if RUBY_PLATFORM =~ /linux/
    gem 'sleepy_penguin', '3.4.1'

    # is 2.6.32 new enough?
    gem 'io_splice', '4.4.0' if `uname -r`.strip > '2.6.32'
  end
end

$stdout.reopen(old_out)

# don't load the old Rev if it exists, Cool.io 1.0.0 is compatible with it,
# even for everything Revactor uses.
dirs = Dir["#{path}/gems/*-*/lib"]
dirs.delete_if { |x| x =~ %r{/rev-[\d\.]+/lib} }
puts dirs.map { |x| File.expand_path(x) }.join(':')
rainbows-5.0.0/t/client_header_buffer_size.ru0000644000004100000410000000022512641135250021352 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType, "text/plain"
run lambda { |env|
  [ 200, {}, [ "#{Rainbows.server.client_header_buffer_size}\n" ] ]
}
rainbows-5.0.0/t/fast-pipe-response.ru0000644000004100000410000000044112641135250017725 0ustar  www-datawww-data# must be run without Rack::Lint since that clobbers to_path
use Rainbows::DevFdResponse
run(lambda { |env|
  [ 200,
    {
      'Content-Length' => ::File.stat('random_blob').size.to_s,
      'Content-Type' => 'application/octet-stream',
    },
    IO.popen('cat random_blob', 'rb') ]
})
rainbows-5.0.0/t/env_rack_env.ru0000644000004100000410000000015612641135250016644 0ustar  www-datawww-datause Rack::ContentLength
run proc { |env|
  [ 200, { "Content-Type" => "text/plain" }, [ ENV["RACK_ENV"] ] ]
}
rainbows-5.0.0/t/t0040-keepalive_requests-setting.sh0000755000004100000410000000222212641135250022307 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll
t_plan 6 "keepalive_requests limit tests for $model"

t_begin "setup and start" && {
	rainbows_setup $model 50 666
	rtmpfiles curl_out curl_err
	grep 'keepalive_timeout 666' $unicorn_config
	rainbows -E none -D env.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "curl requests hit default keepalive_requests limit" && {
	curl -sSfv http://$listen/[0-101] > $curl_out 2> $curl_err
	test 1 -eq $(grep 'Connection: close' $curl_err |count_lines)
	test 101 -eq $(grep 'Connection: keep-alive' $curl_err |count_lines)
}

t_begin "reload with smaller keepalive_requests limit" && {
	ed -s $unicorn_config < $curl_out 2> $curl_err
	test 2 -eq $(grep 'Connection: close' $curl_err |count_lines)
	test 11 -eq $(grep 'Connection: keep-alive' $curl_err |count_lines)
}

t_begin "killing succeeds" && {
	kill $rainbows_pid
}

t_begin "check stderr" && {
	check_stderr
}

t_done
rainbows-5.0.0/t/t0114-rewindable-input-true.sh0000755000004100000410000000112312641135250021163 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models EventMachine NeverBlock
skip_models Rev RevThreadSpawn RevThreadPool
skip_models Coolio CoolioThreadSpawn CoolioThreadPool
skip_models Epoll XEpoll

t_plan 4 "rewindable_input toggled to true"

t_begin "setup and start" && {
	rainbows_setup
	echo rewindable_input true >> $unicorn_config
	rainbows -D -c $unicorn_config t0114.ru
	rainbows_wait_start
}

t_begin "ensure worker is started" && {
	test xOK = x$(curl -T t0114.ru -sSf http://$listen/)
}

t_begin "killing succeeds" && {
	kill $rainbows_pid
}

t_begin "check stderr" && {
	check_stderr
}

t_done
rainbows-5.0.0/t/test-lib.sh0000644000004100000410000001273412641135250015720 0ustar  www-datawww-data#!/bin/sh
# Copyright (c) 2009 Rainbows! developers
. ./my-tap-lib.sh

set +u

# sometimes we rely on http_proxy to avoid wasting bandwidth with Isolate
# and multiple Ruby versions
NO_PROXY=${UNICORN_TEST_ADDR-127.0.0.1}
export NO_PROXY

if test -z "$model"
then
	# defaulting to Base would unfortunately fail some concurrency tests
	model=ThreadSpawn
	t_info "model undefined, defaulting to $model"
fi

set -e
RUBY="${RUBY-ruby}"
RUBY_VERSION=${RUBY_VERSION-$($RUBY -e 'puts RUBY_VERSION')}
t_pfx=$PWD/trash/$model.$T-$RUBY_ENGINE-$RUBY_VERSION
set -u

PATH=$PWD/bin:$PATH
export PATH

test -x $PWD/bin/unused_listen || die "must be run in 't' directory"

# requires $1 and prints out the value of $2
require_check () {
	lib=$1
	const=$2
	if ! $RUBY -r$lib -e "puts $const" >/dev/null 2>&1
	then
		t_info "skipping $T since we don't have $lib"
		exit 0
	fi
}

# "date +%s" is not in POSIX, but in GNU, and FreeBSD 9.0 (possibly earlier)
unix_time () {
	$RUBY -e 'puts Time.now.to_i'
}

# "wc -l" outputs leading whitespace on *BSDs, filter it out for portability
count_lines () {
	wc -l | tr -d '[:space:]'
}

# "wc -c" outputs leading whitespace on *BSDs, filter it out for portability
count_bytes () {
	wc -c | tr -d '[:space:]'
}

skip_models () {
	for i in "$@"
	do
		if test x"$model" != x"$i"
		then
			continue
		fi
		t_info "skipping $T since it is not compatible with $model"
		exit 0
	done
}


# given a list of variable names, create temporary files and assign
# the pathnames to those variables
rtmpfiles () {
	for id in "$@"
	do
		name=$id
		_tmp=$t_pfx.$id
		eval "$id=$_tmp"

		case $name in
		*fifo)
			rm -f $_tmp
			mkfifo $_tmp
			T_RM_LIST="$T_RM_LIST $_tmp"
			;;
		*socket)
			rm -f $_tmp
			T_RM_LIST="$T_RM_LIST $_tmp"
			;;
		*)
			> $_tmp
			T_OK_RM_LIST="$T_OK_RM_LIST $_tmp"
			;;
		esac
	done
}

dbgcat () {
	id=$1
	eval '_file=$'$id
	echo "==> $id <=="
	sed -e "s/^/$id:/" < $_file
}

check_stderr () {
	set +u
	_r_err=${1-${r_err}}
	set -u
	if grep -i Error $_r_err
	then
		die "Errors found in $_r_err"
	elif grep SIGKILL $_r_err
	then
		die "SIGKILL found in $_r_err"
	fi
}

# rainbows_setup [ MODEL [ WORKER_CONNECTIONS ] ]
rainbows_setup () {
	eval $(unused_listen)
	rtmpfiles unicorn_config pid r_err r_out fifo tmp ok
	cat > $unicorn_config <> $unicorn_config
}

rainbows_wait_start () {
	# "cat $fifo" will block until the before_fork hook is called in
	# the Unicorn config file
	test xSTART = x"$(cat $fifo)"
	rainbows_pid=$(cat $pid)
}

wait_for_reload () {
	case $# in
	0) err_log=$r_err status=done ;;
	1) err_log=$1 status=done ;;
	2) err_log=$1 status=$2 ;;
	esac
	while ! egrep '(done|error) reloading' < $err_log >/dev/null
	do
		sleep 1
	done
	grep "$status reloading" $err_log >/dev/null
}

wait_for_reap () {
	case $# in
	0) err_log=$r_err ;;
	1) err_log=$1 ;;
	esac

	while ! grep reaped < $err_log >/dev/null
	do
		sleep 1
	done
}

rsha1 () {
	_cmd="$(which sha1sum 2>/dev/null || :)"
	test -n "$_cmd" || _cmd="$(which openssl 2>/dev/null || :) sha1"
	test "$_cmd" != " sha1" || _cmd="$(which gsha1sum 2>/dev/null || :)"

	# last resort, see comments in sha1sum.rb for reasoning
	test -n "$_cmd" || _cmd=sha1sum.rb
	expr "$($_cmd)" : '\([a-f0-9]\{40\}\)'
}

req_curl_chunked_upload_err_check () {
	set +e
	curl --version 2>/dev/null | awk '$1 == "curl" {
		split($2, v, /\./)
		if ((v[1] < 7) || (v[1] == 7 && v[2] < 18))
			code = 1
	}
	END { exit(code) }'
	if test $? -ne 0
	then
		t_info "curl >= 7.18.0 required for $T"
		exit 0
	fi
}

check_splice () {
	case $(uname -s) in
	Linux) ;;
	*)
		t_info "skipping $T since it's not Linux"
		exit 0
		;;
	esac

	# we only allow splice on 2.6.32+
	min=32 uname_r=$(uname -r)
	case $uname_r in
	2.6.*)
		sub=$(expr "$uname_r" : '2\.6\.\(.*\)$')
		if test $sub -lt $min
		then
			t_info "skipping $T (Linux $(uname_r < 2.6.$min)"
			exit 0
		fi
		;;
	[3-9].*)
		# OK
		;;
	*)
		t_info "skipping $T (Linux $uname_r < 2.6.$min)"
		exit 0
		;;
	esac
}

check_threaded_app_dispatch () {
	case $model in
	ThreadSpawn|ThreadPool) ;;
	RevThreadSpawn|RevThreadPool) ;;
	CoolioThreadSpawn|CoolioThreadPool) ;;
	XEpollThreadSpawn|XEpollThreadPool) ;;
	*)
		t_info "$0 is only compatible with threaded app dispatch"
		exit 0 ;;
	esac
}

check_copy_stream () {
	case $RUBY_VERSION in
	1.9.*) ;;
	*)
		t_info "skipping $T since it can't IO.copy_stream"
		exit 0
		;;
	esac

	case $model in
	ThreadSpawn|WriterThreadSpawn|ThreadPool|WriterThreadPool|Base) ;;
	XEpollThreadSpawn|XEpollThreadPool) ;;
	*)
		t_info "skipping $T since it doesn't use copy_stream"
		exit 0
		;;
	esac
}

case $model in
Rev) require_check rev Rev::VERSION ;;
Coolio) require_check coolio Coolio::VERSION ;;
Revactor) require_check revactor Revactor::VERSION ;;
EventMachine) require_check eventmachine EventMachine::VERSION ;;
esac
rainbows-5.0.0/t/t0003-reopen-logs.sh0000755000004100000410000000425512641135250017175 0ustar  www-datawww-data#!/bin/sh
# don't set nr_client for Rev, only _one_ app running at once :x
nr_client=${nr_client-2}
. ./test-lib.sh

t_plan 19 "reopen rotated logs"

t_begin "setup and startup" && {
	rtmpfiles curl_out curl_err r_rot
	rainbows_setup $model
	rainbows -D sleep.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "ensure server is responsive" && {
	curl -sSf http://$listen/ >/dev/null
}

t_begin "start $nr_client concurrent requests" && {
	start=$(unix_time)
	for i in $(awk "BEGIN{for(i=0;i<$nr_client;++i) print i}" > $curl_out 2>> $curl_err ) &
	done
}

t_begin "ensure stderr log is clean" && check_stderr

t_begin "external log rotation" && {
	rm -f $r_rot
	mv $r_err $r_rot
}

t_begin "send reopen log signal (USR1)" && {
	kill -USR1 $rainbows_pid
}

t_begin "wait for rotated log to reappear" && {
	nr=60
	while ! test -f $r_err && test $nr -ge 0
	do
		sleep 1
		nr=$(( $nr - 1 ))
	done
}

t_begin "wait for worker to reopen logs" && {
	nr=60
	re="worker=.* done reopening logs"
	while ! grep "$re" < $r_err >/dev/null && test $nr -ge 0
	do
		sleep 1
		nr=$(( $nr - 1 ))
	done
}

dbgcat r_rot
dbgcat r_err

t_begin "wait curl requests to finish" && {
	wait
	t_info elapsed=$(( $(unix_time) - $start ))
}

t_begin "ensure no errors from curl" && {
	test ! -s $curl_err
}

t_begin "curl got $nr_client responses" && {
	test "$(count_lines < $curl_out)" -eq $nr_client
}

t_begin "all responses were identical" && {
	nr=$(sort < $curl_out | uniq | count_lines)
	test "$nr" -eq 1
}

t_begin 'response was "Hello"' && {
	test x$(sort < $curl_out | uniq) = xHello
}

t_begin "current server stderr is clean" && check_stderr

t_begin "rotated stderr is clean" && {
	check_stderr $r_rot
}

t_begin "server is now writing logs to new stderr" && {
	before_rot=$(count_bytes < $r_rot)
	before_err=$(count_bytes < $r_err)
	curl -sSfv http://$listen/
	after_rot=$(count_bytes < $r_rot)
	after_err=$(count_bytes < $r_err)
	test $after_rot -eq $before_rot
	test $after_err -gt $before_err
}

t_begin "stop server" && {
	kill $rainbows_pid
}

dbgcat r_err

t_begin "current server stderr is clean" && check_stderr
t_begin "rotated stderr is clean" && check_stderr $r_rot

t_done
rainbows-5.0.0/t/t0101-rack-input-trailer.sh0000755000004100000410000000352712641135250020460 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
test -r random_blob || die "random_blob required, run with 'make $0'"

t_plan 13 "input trailer test $model"

t_begin "setup and startup" && {
	rtmpfiles curl_out
	rainbows_setup $model
	rainbows -D content-md5.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "staggered trailer upload" && {
	zero_md5="1B2M2Y8AsgTpgAmY7PhCfg=="
	(
		cat $fifo > $tmp &
		printf 'PUT /s HTTP/1.1\r\n'
		printf 'Host: example.com\r\n'
		printf 'Transfer-Encoding: chunked\r\n'
		printf 'Trailer: Content-MD5\r\n\r\n'
		printf '0\r\nContent-MD5: '
		sleep 5
		printf '%s\r\n\r\n' $zero_md5
		wait
		echo ok > $ok
	) | socat - TCP:$listen > $fifo
	test xok = x"$(cat $ok)"
}

t_begin "HTTP response is OK" && {
	grep 'HTTP/1\.[01] 200 OK' $tmp
}

t_begin "upload small blob" && {
	(
		cat $fifo > $tmp &
		echo hello world | content-md5-put
		wait
		echo ok > $ok
	) | socat - TCP:$listen > $fifo
	test xok = x"$(cat $ok)"
}

t_begin "HTTP response is OK" && grep 'HTTP/1\.[01] 200 OK' $tmp
t_begin "no errors in stderr log" && check_stderr

t_begin "big blob request" && {
	(
		cat $fifo > $tmp &
		content-md5-put < random_blob
		wait
		echo ok > $ok
	) | socat - TCP:$listen > $fifo
	test xok = x"$(cat $ok)"
}

t_begin "HTTP response is OK" && grep 'HTTP/1\.[01] 200 OK' $tmp
t_begin "no errors in stderr log" && check_stderr

t_begin "staggered blob upload" && {
	(
		cat $fifo > $tmp &
		(
			dd bs=164 count=1 < random_blob
			sleep 2
			dd bs=4545 count=1 < random_blob
			sleep 2
			dd bs=1234 count=1 < random_blob
			echo subok > $ok
		) 2>/dev/null | content-md5-put
		test xsubok = x"$(cat $ok)"
		wait
		echo ok > $ok
	) | socat - TCP:$listen > $fifo
	test xok = x"$(cat $ok)"
}

t_begin "HTTP response is OK" && grep 'HTTP/1\.[01] 200 OK' $tmp

t_begin "no errors in stderr log" && check_stderr

t_begin "kill server" && {
	kill $rainbows_pid
}

t_done
rainbows-5.0.0/t/t0021-sendfile-wrap-to_path.sh0000755000004100000410000000500012641135250021124 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll
test -r random_blob || die "random_blob required, run with 'make $0'"
case $RUBY_ENGINE in
ruby) ;;
*)
	t_info "skipping $T since it can't load the sendfile gem, yet"
	exit 0
	;;
esac

t_plan 16 "sendfile wrap body response for $model"

t_begin "setup and startup" && {
	rtmpfiles out err http_fifo sub_ok
	rainbows_setup $model
	echo 'require "sendfile"' >> $unicorn_config
	echo 'def (::IO).copy_stream(*x); abort "NO"; end' >> $unicorn_config

	# can't load Rack::Lint here since it clobbers body#to_path
	export fifo
	rainbows -E none -D file-wrap-to_path.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "read random blob sha1" && {
	random_blob_sha1=$(rsha1 < random_blob)
}

t_begin "start FIFO reader" && {
	cat $fifo > $out &
}

t_begin "single request matches" && {
	sha1=$(curl -sSfv 2> $err http://$listen/random_blob | rsha1)
	test -n "$sha1"
	test x"$sha1" = x"$random_blob_sha1"
}

t_begin "body.close called" && {
	wait # for cat $fifo
	grep CLOSING $out || die "body.close not logged"
}

t_begin "start FIFO reader for abortive HTTP/1.1 request" && {
	cat $fifo > $out &
}

t_begin "send abortive HTTP/1.1 request" && {
	rm -f $ok
	(
		printf 'GET /random_blob HTTP/1.1\r\nHost: example.com\r\n\r\n'
		dd bs=4096 count=1 < $http_fifo >/dev/null
		echo ok > $ok
	) | socat - TCP:$listen > $http_fifo || :
	test xok = x$(cat $ok)
}

t_begin "body.close called for aborted HTTP/1.1 request" && {
	wait # for cat $fifo
	grep CLOSING $out || die "body.close not logged"
}

t_begin "start FIFO reader for abortive HTTP/1.0 request" && {
	cat $fifo > $out &
}

t_begin "send abortive HTTP/1.0 request" && {
	rm -f $ok
	(
		printf 'GET /random_blob HTTP/1.0\r\n\r\n'
		dd bs=4096 count=1 < $http_fifo >/dev/null
		echo ok > $ok
	) | socat - TCP:$listen > $http_fifo || :
	test xok = x$(cat $ok)
}

t_begin "body.close called for aborted HTTP/1.0 request" && {
	wait # for cat $fifo
	grep CLOSING $out || die "body.close not logged"
}

t_begin "start FIFO reader for abortive HTTP/0.9 request" && {
	cat $fifo > $out &
}

t_begin "send abortive HTTP/0.9 request" && {
	rm -f $ok
	(
		printf 'GET /random_blob\r\n'
		dd bs=4096 count=1 < $http_fifo >/dev/null
		echo ok > $ok
	) | socat - TCP:$listen > $http_fifo || :
	test xok = x$(cat $ok)
}

t_begin "body.close called for aborted HTTP/0.9 request" && {
	wait # for cat $fifo
	grep CLOSING $out || die "body.close not logged"
}

t_begin "shutdown server" && {
	kill -QUIT $rainbows_pid
}

t_begin "check stderr" && check_stderr

t_done
rainbows-5.0.0/t/simple-http_XEpoll.ru0000644000004100000410000000034412641135250017734 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env|
  if env['rack.multithread'] == false && env['rainbows.model'] == :XEpoll
    [ 200, {}, [ Thread.current.inspect << "\n" ] ]
  else
    raise env.inspect
  end
}
rainbows-5.0.0/t/content-md5.ru0000644000004100000410000000113212641135250016334 0ustar  www-datawww-data# SHA1 checksum generator
bs = ENV['bs'] ? ENV['bs'].to_i : 4096
require 'digest/md5'
use Rack::ContentLength
app = lambda do |env|
  /\A100-continue\z/i =~ env['HTTP_EXPECT'] and
    return [ 100, {}, [] ]
  digest = Digest::MD5.new
  input = env['rack.input']
  if buf = input.read(bs)
    begin
      digest.update(buf)
    end while input.read(bs, buf)
  end

  expect = env['HTTP_CONTENT_MD5']
  readed = [ digest.digest ].pack('m').strip
  body = "expect=#{expect}\nreaded=#{readed}\n"
  status = expect == readed ? 200 : 500

  [ status, {'Content-Type' => 'text/plain'}, [ body ] ]
end
run app
rainbows-5.0.0/t/t0004-heartbeat-timeout.sh0000755000004100000410000000311612641135250020362 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh

t_plan 12 "heartbeat/timeout test for $model"

t_begin "setup and startup" && {
	rainbows_setup $model
	echo timeout 3 >> $unicorn_config
	echo preload_app true >> $unicorn_config
	rainbows -D heartbeat-timeout.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "read worker PID" && {
	worker_pid=$(curl -sSf http://$listen/)
	t_info "worker_pid=$worker_pid"
}

t_begin "sleep for a bit, ensure worker PID does not change" && {
	sleep 4
	test $(curl -sSf http://$listen/) -eq $worker_pid
}

t_begin "block the worker process to force it to die" && {
	rm $ok
	t0=$(unix_time)
	err="$(curl -sSf http://$listen/block-forever 2>&1 || > $ok)"
	t1=$(unix_time)
	elapsed=$(($t1 - $t0))
	t_info "elapsed=$elapsed err=$err"
	test x"$err" != x"Should never get here"
	test x"$err" != x"$worker_pid"
}

t_begin "ensure worker was killed" && {
	test -e $ok
	test 1 -eq $(grep timeout $r_err | grep killing | count_lines)
}

t_begin "ensure timeout took at least 3 seconds" && {
	test $elapsed -ge 3
}

t_begin "wait for new worker to start up" && {
	test xSTART = x"$(cat $fifo)"
}

t_begin "we get a fresh new worker process" && {
	new_worker_pid=$(curl -sSf http://$listen/)
	test $new_worker_pid -ne $worker_pid
}

t_begin "truncate the server error log" && {
	> $r_err
}

t_begin "SIGSTOP and SIGCONT on rainbows master does not kill worker" && {
	kill -STOP $rainbows_pid
	sleep 4
	kill -CONT $rainbows_pid
	sleep 2
	test $new_worker_pid -eq $(curl -sSf http://$listen/)
}

t_begin "stop server" && {
	kill -QUIT $rainbows_pid
}

t_begin "check stderr" && check_stderr

dbgcat r_err

t_done
rainbows-5.0.0/t/sleep.ru0000644000004100000410000000044212641135250015312 0ustar  www-datawww-datause Rack::ContentLength

run lambda { |env|
  /\A100-continue\z/i =~ env['HTTP_EXPECT'] and return [ 100, {}, [] ]

  env['rack.input'].read
  nr = 1
  env["PATH_INFO"] =~ %r{/([\d\.]+)\z} and nr = $1.to_f

  Rainbows.sleep(nr)

  [ 200, {'Content-Type' => 'text/plain'}, [ "Hello\n" ] ]
}
rainbows-5.0.0/t/t0009-broken-app.sh0000755000004100000410000000217712641135250017010 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll

t_plan 9 "graceful handling of broken apps for $model"

t_begin "setup and start" && {
	rainbows_setup $model 1
	rainbows -E none -D t0009.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "normal response is alright" && {
	test xOK = x"$(curl -sSf http://$listen/)"
}

t_begin "app raised exception" && {
	curl -sSf http://$listen/raise 2> $tmp || :
	grep -F 500 $tmp
	> $tmp
}

t_begin "app exception logged and backtrace not swallowed" && {
	grep -F 'app error' $r_err
	grep -A1 -F 'app error' $r_err | tail -1 | grep t0009.ru:
	dbgcat r_err
	> $r_err
}

t_begin "trigger bad response" && {
	curl -sSf http://$listen/nil 2> $tmp || :
	grep -F 500 $tmp
	> $tmp
}

t_begin "app exception logged" && {
	grep -F 'app error' $r_err
	> $r_err
}

t_begin "normal responses alright afterwards" && {
	> $tmp
	curl -sSf http://$listen/ >> $tmp &
	curl -sSf http://$listen/ >> $tmp &
	curl -sSf http://$listen/ >> $tmp &
	curl -sSf http://$listen/ >> $tmp &
	wait
	test xOK = x$(sort < $tmp | uniq)
}

t_begin "teardown" && {
	kill $rainbows_pid
}

t_begin "check stderr" && check_stderr

t_done
rainbows-5.0.0/t/t0030-fast-pipe-response.sh0000755000004100000410000000273012641135250020463 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll
test -r random_blob || die "random_blob required, run with 'make $0'"

t_plan 10 "fast pipe response for $model"

t_begin "setup and startup" && {
	rtmpfiles err out
	rainbows_setup $model
	rainbows -E none -D fast-pipe-response.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "read random blob sha1" && {
	random_blob_sha1=$(rsha1 < random_blob)
	three_sha1=$(cat random_blob random_blob random_blob | rsha1)
}

t_begin "single request matches" && {
	sha1=$(curl -sSfv 2> $err http://$listen/ | rsha1)
	test -n "$sha1"
	test x"$sha1" = x"$random_blob_sha1"
}

t_begin "Content-Length header preserved in response" && {
	grep "^< Content-Length:" $err
}

t_begin "send three keep-alive requests" && {
	sha1=$(curl -vsSf 2> $err \
	       http://$listen/ http://$listen/ http://$listen/ | rsha1)
	test -n "$sha1"
	test x"$sha1" = x"$three_sha1"
}

t_begin "ensure responses were all keep-alive" && {
	test 3 -eq $(grep '< Connection: keep-alive' < $err | count_lines)
}

t_begin "HTTP/1.0 test" && {
	sha1=$(curl -0 -v 2> $err -sSf http://$listen/ | rsha1)
	test $sha1 = $random_blob_sha1
	grep '< Connection: close' < $err
}

t_begin "HTTP/0.9 test" && {
	(
		printf 'GET /\r\n'
		rsha1 < $fifo > $tmp &
		wait
		echo ok > $ok
	) | socat - TCP:$listen > $fifo
	test $(cat $tmp) = $random_blob_sha1
	test xok = x$(cat $ok)
}

t_begin "shutdown server" && {
	kill -QUIT $rainbows_pid
}

t_begin "check stderr" && check_stderr

t_done
rainbows-5.0.0/t/app_deferred.ru0000644000004100000410000000121312641135250016617 0ustar  www-datawww-data#\-E none
# can't use non-compatible middleware that doesn't pass "deferered?" calls
#
# used for testing deferred actions for Merb and possibly other frameworks
# ref: http://brainspl.at/articles/2008/04/18/deferred-requests-with-merb-ebb-and-thin

class DeferredApp < Struct.new(:app)
  def deferred?(env)
    env["PATH_INFO"] == "/deferred"
  end

  def call(env)
    env["rack.multithread"] or raise RuntimeError, "rack.multithread not true"
    body = "#{Thread.current.inspect}\n"
    headers = {
      "Content-Type" => "text/plain",
      "Content-Length" => body.size.to_s,
    }
    [ 200, headers, [ body ] ]
  end
end

run DeferredApp.new
rainbows-5.0.0/t/rack-fiber_pool/0000755000004100000410000000000012641135250016670 5ustar  www-datawww-datarainbows-5.0.0/t/rack-fiber_pool/app.ru0000644000004100000410000000033412641135250020020 0ustar  www-datawww-datarequire 'rack/fiber_pool'
use Rack::FiberPool
use Rack::ContentLength
use Rack::ContentType, 'text/plain'
run lambda { |env|
  f = Fiber.current
  EM.add_timer(3) { f.resume }
  Fiber.yield
  [ 200, {}, [ "#{f}\n" ] ]
}
rainbows-5.0.0/t/kgio-pipe-response.ru0000644000004100000410000000062412641135250017724 0ustar  www-datawww-data# must be run without Rack::Lint since that clobbers to_path
use Rainbows::DevFdResponse
run(lambda { |env|
  io = case env["rainbows.model"].to_s
  when /Fiber/
    Rainbows::Fiber::IO::Pipe
  else
    Kgio::Pipe
  end.popen('cat random_blob', 'rb')

  [ 200,
    {
      'Content-Length' => ::File.stat('random_blob').size.to_s,
      'Content-Type' => 'application/octet-stream',
    },
    io
  ]
})
rainbows-5.0.0/t/t0103-rack-input-limit.sh0000755000004100000410000000271612641135250020135 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll
test -r random_blob || die "random_blob required, run with 'make $0'"
req_curl_chunked_upload_err_check

t_plan 6 "rack.input client_max_body_size default"

t_begin "setup and startup" && {
	rtmpfiles curl_out curl_err cmbs_config
	rainbows_setup $model
	grep -v client_max_body_size < $unicorn_config > $cmbs_config
	rainbows -D sha1-random-size.ru -c $cmbs_config
	rainbows_wait_start
}

t_begin "regular request" && {
	rm -f $ok
	curl -vsSf -T random_blob -H Expect: \
	  http://$listen/ > $curl_out 2> $curl_err || > $ok
	dbgcat curl_err
	dbgcat curl_out
	test -e $ok
}

t_begin "chunked request" && {
	rm -f $ok
	curl -vsSf -T- < random_blob -H Expect: \
	  http://$listen/ > $curl_out 2> $curl_err || > $ok
	dbgcat curl_err
	dbgcat curl_out
	test -e $ok
}

t_begin "default size sha1 chunked" && {
	blob_sha1=3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3
	rm -f $ok
	> $r_err
	dd if=/dev/zero bs=1048576 count=1 | \
	  curl -vsSf -T- -H Expect: \
	  http://$listen/ > $curl_out 2> $curl_err
	test "$(cat $curl_out)" = $blob_sha1
	dbgcat curl_err
	dbgcat curl_out
}

t_begin "default size sha1 content-length" && {
	blob_sha1=3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3
	rm -f $ok
	dd if=/dev/zero bs=1048576 count=1 of=$tmp
	curl -vsSf -T $tmp -H Expect: \
	  http://$listen/ > $curl_out 2> $curl_err
	test "$(cat $curl_out)" = $blob_sha1
	dbgcat curl_err
	dbgcat curl_out
}

t_begin "shutdown" && {
	kill $rainbows_pid
}

t_done
rainbows-5.0.0/t/t0501-cramp-rainsocket.sh0000755000004100000410000000156012641135250020204 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
case $model in
disabled) ;;
*)
	t_info "skipping $T since it's not compatible with $model"
	exit 0
	;;
esac
RUBYLIB=$($RUBY test_isolate_cramp.rb):$RUBYLIB
export RUBYLIB
require_check cramp Cramp::VERSION

t_plan 4 "WebSocket monkey patch validity test for Cramp"

CONFIG_RU=cramp/rainsocket.ru

t_begin "setup and start" && {
	rainbows_setup
	rtmpfiles curl_err

	# Like the rest of the EM/async stuff, it's not Rack::Lint compatible
	rainbows -E deployment -D $CONFIG_RU -c $unicorn_config
	rainbows_wait_start
}

t_begin "wait for server to say hello to us" && {
	ok=$( (curl --no-buffer -sS http://$listen/ || :) | \
	     (tr -d '\0\0377' || :) | \
	     awk '/Hello from the Server/ { print "ok"; exit 0 }')

	test x"$ok" = xok
}

t_begin "termination signal sent" && {
	kill $rainbows_pid
}

t_begin "no errors in stderr" && check_stderr

t_done
rainbows-5.0.0/t/t0106-rack-input-keepalive.sh0000755000004100000410000000560512641135250020767 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll
t_plan 11 "rack.input pipelining test"

t_begin "setup and startup" && {
	rainbows_setup $model
	rtmpfiles req
	rainbows -D sha1.ru -c $unicorn_config
	body=hello
	body_size=$(printf $body | count_bytes)
	body_sha1=$(printf $body | rsha1)
	random_blob_size=$(count_bytes < random_blob)
	random_blob_sha1=$(rsha1 < random_blob)
	rainbows_wait_start
}

t_begin "send big pipelined chunked requests" && {
	(
		cat $fifo > $tmp &
		Connection=keep-alive
		export Connection
		content-md5-put < random_blob
		content-md5-put < random_blob
		content-md5-put < random_blob
		printf 'PUT / HTTP/1.0\r\n'
		printf 'Content-Length: %d\r\n\r\n' $random_blob_size
		cat random_blob
		wait
		echo ok > $ok
	) | socat - TCP4:$listen > $fifo
	test x"$(cat $ok)" = xok
}

t_begin "check responses" && {
	dbgcat tmp
	test 4 -eq $(grep $random_blob_sha1 $tmp | count_lines)
}

t_begin "send big pipelined identity requests" && {
	(
		cat $fifo > $tmp &
		printf 'PUT / HTTP/1.0\r\n'
		printf 'Connection: keep-alive\r\n'
		printf 'Content-Length: %d\r\n\r\n' $random_blob_size
		cat random_blob
		printf 'PUT / HTTP/1.1\r\n'
		printf 'Content-Length: %d\r\n\r\n' $random_blob_size
		cat random_blob
		printf 'PUT / HTTP/1.0\r\n'
		printf 'Content-Length: %d\r\n\r\n' $random_blob_size
		cat random_blob
		wait
		echo ok > $ok
	) | socat - TCP4:$listen > $fifo
	test x"$(cat $ok)" = xok
}

t_begin "check responses" && {
	dbgcat tmp
	test 3 -eq $(grep $random_blob_sha1 $tmp | count_lines)
}

t_begin "send pipelined identity requests" && {

	{
		printf 'PUT / HTTP/1.0\r\n'
		printf 'Connection: keep-alive\r\n'
		printf 'Content-Length: %d\r\n\r\n%s' $body_size $body
		printf 'PUT / HTTP/1.1\r\nHost: example.com\r\n'
		printf 'Content-Length: %d\r\n\r\n%s' $body_size $body
		printf 'PUT / HTTP/1.0\r\n'
		printf 'Content-Length: %d\r\n\r\n%s' $body_size $body
	} > $req
	(
		cat $fifo > $tmp &
		cat $req
		wait
		echo ok > $ok
	) | socat - TCP4:$listen > $fifo
	test x"$(cat $ok)" = xok
}

t_begin "check responses" && {
	dbgcat tmp
	test 3 -eq $(grep $body_sha1 $tmp | count_lines)
}

t_begin "send pipelined chunked requests" && {

	{
		printf 'PUT / HTTP/1.0\r\n'
		printf 'Connection: keep-alive\r\n'
		printf 'Transfer-Encoding: chunked\r\n\r\n'
		printf '%x\r\n%s\r\n0\r\n\r\n' $body_size $body
		printf 'PUT / HTTP/1.1\r\nHost: example.com\r\n'
		printf 'Transfer-Encoding: chunked\r\n\r\n'
		printf '%x\r\n%s\r\n0\r\n\r\n' $body_size $body
		printf 'PUT / HTTP/1.0\r\n'
		printf 'Transfer-Encoding: chunked\r\n\r\n'
		printf '%x\r\n%s\r\n0\r\n\r\n' $body_size $body
	} > $req
	(
		cat $fifo > $tmp &
		cat $req
		wait
		echo ok > $ok
	) | socat - TCP4:$listen > $fifo
	test x"$(cat $ok)" = xok
}

t_begin "check responses" && {
	dbgcat tmp
	test 3 -eq $(grep $body_sha1 $tmp | count_lines)
}

t_begin "kill server" && kill $rainbows_pid

t_begin "no errors in stderr log" && check_stderr

t_done
rainbows-5.0.0/t/t9101-thread-timeout-threshold.sh0000755000004100000410000000172612641135250021700 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
check_threaded_app_dispatch

t_plan 6 "ThreadTimeout Rack middleware test for $model"

t_begin "configure and start" && {
	rtmpfiles curl_err curl_out
	rainbows_setup $model 10
	rainbows -D t9101.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "normal request should not timeout" && {
	test x"HI" = x"$(curl -sSf http://$listen/ 2>> $curl_err)"
}

t_begin "8 sleepy requests do not time out" && {
	> $curl_err
	for i in 1 2 3 4 5 6 7 8
	do
		curl --no-buffer -sSf http://$listen/3 \
		  2>> $curl_err >> $curl_out &
	done
	wait
	test 8 -eq "$(count_lines < $curl_out)"
	test xHI = x"$(sort < $curl_out | uniq)"
}

t_begin "9 sleepy requests, some time out" && {
	> $curl_err
	> $curl_out
	for i in 1 2 3 4 5 6 7 8 9
	do
		curl -sSf --no-buffer \
		  http://$listen/3 2>> $curl_err >> $curl_out &
	done
	wait
	grep 408 $curl_err
}

t_begin "kill server" && {
	kill $rainbows_pid
}

t_begin "no errors in Rainbows! stderr" && {
	check_stderr
}

t_done
rainbows-5.0.0/t/t0114.ru0000644000004100000410000000033012641135250014747 0ustar  www-datawww-data#\ -E none
use Rack::ContentLength
use Rack::ContentType, 'text/plain'
app = lambda do |env|
  case env['rack.input']
  when Unicorn::TeeInput
    [ 200, {}, %w(OK) ]
  else
    [ 500, {}, %w(NO) ]
  end
end
run app
rainbows-5.0.0/t/t0017-keepalive-timeout-zero.sh0000755000004100000410000000163112641135250021351 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll
t_plan 6 "keepalive_timeout 0 tests for $model"

t_begin "setup and start" && {
	rainbows_setup $model 2 0
	grep 'keepalive_timeout 0' $unicorn_config
	rainbows -D env.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin 'check server responds with Connection: close' && {
	curl -sSfi http://$listen/ | grep 'Connection: close'
}

t_begin "send keepalive response that does not expect close" && {
	req='GET / HTTP/1.1\r\nHost: example.com\r\n\r\n'
	t0=$(unix_time)
	(
		cat $fifo > $tmp &
		printf "$req"
		wait
		unix_time > $ok
	) | socat - TCP:$listen > $fifo
	now="$(cat $ok)"
	elapsed=$(( $now - $t0 ))
	t_info "elapsed=$elapsed (expecting <=3)"
	test $elapsed -le 3
}

t_begin "'Connection: close' header set" && {
	grep 'Connection: close' $tmp
}

t_begin "killing succeeds" && {
	kill $rainbows_pid
}

t_begin "check stderr" && {
	check_stderr
}

t_done
rainbows-5.0.0/t/t0014-config-conflict.sh0000755000004100000410000000157012641135250020006 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
t_plan 6 "config variables conflict with preload_app"

t_begin "setup and start" && {
	rainbows_setup
	rtmpfiles ru

	cat > $ru <<\EOF
use Rack::ContentLength
use Rack::ContentType, "text/plain"
config = ru = { "hello" => "world" }
run lambda { |env| [ 200, {}, [ ru.inspect << "\n" ] ] }
EOF
	echo 'preload_app true' >> $unicorn_config
	rainbows -D -c $unicorn_config $ru
	rainbows_wait_start
}

t_begin "hit with curl" && {
	out=$(curl -sSf http://$listen/)
	test x"$out" = x'{"hello"=>"world"}'
}

t_begin "modify rackup file" && {
	ed -s $ru <"WORLD"}'
}

t_begin "killing succeeds" && {
	kill $rainbows_pid
}

t_done
rainbows-5.0.0/t/t0031-close-pipe-response.sh0000755000004100000410000000420412641135250020632 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll

t_plan 16 "close pipe response for $model"

t_begin "setup and startup" && {
	rtmpfiles err out http_fifo sub_ok
	rainbows_setup $model
	export fifo
	rainbows -E none -D close-pipe-response.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "read random blob sha1" && {
	random_blob_sha1=$(rsha1 < random_blob)
}

t_begin "start FIFO reader" && {
	cat $fifo > $out &
}

t_begin "single request matches" && {
	sha1=$(curl -sSfv 2> $err http://$listen/ | rsha1)
	test -n "$sha1"
	test x"$sha1" = x"$random_blob_sha1"
}

t_begin "body.close called" && {
	wait # for cat $fifo
	grep CLOSING $out || die "body.close not logged"
}

t_begin "start FIFO reader for abortive HTTP/1.1 request" && {
	cat $fifo > $out &
}

t_begin "send abortive HTTP/1.1 request" && {
	rm -f $ok
	(
		printf 'GET /random_blob HTTP/1.1\r\nHost: example.com\r\n\r\n'
		dd bs=4096 count=1 < $http_fifo >/dev/null
		echo ok > $ok
	) | socat - TCP:$listen > $http_fifo || :
	test xok = x$(cat $ok)
}

t_begin "body.close called for aborted HTTP/1.1 request" && {
	wait # for cat $fifo
	grep CLOSING $out || die "body.close not logged"
}

t_begin "start FIFO reader for abortive HTTP/1.0 request" && {
	cat $fifo > $out &
}

t_begin "send abortive HTTP/1.0 request" && {
	rm -f $ok
	(
		printf 'GET /random_blob HTTP/1.0\r\n\r\n'
		dd bs=4096 count=1 < $http_fifo >/dev/null
		echo ok > $ok
	) | socat - TCP:$listen > $http_fifo || :
	test xok = x$(cat $ok)
}

t_begin "body.close called for aborted HTTP/1.0 request" && {
	wait # for cat $fifo
	grep CLOSING $out || die "body.close not logged"
}

t_begin "start FIFO reader for abortive HTTP/0.9 request" && {
	cat $fifo > $out &
}

t_begin "send abortive HTTP/0.9 request" && {
	rm -f $ok
	(
		printf 'GET /random_blob\r\n'
		dd bs=4096 count=1 < $http_fifo >/dev/null
		echo ok > $ok
	) | socat - TCP:$listen > $http_fifo || :
	test xok = x$(cat $ok)
}

t_begin "body.close called for aborted HTTP/0.9 request" && {
	wait # for cat $fifo
	grep CLOSING $out || die "body.close not logged"
}

t_begin "shutdown server" && {
	kill -QUIT $rainbows_pid
}

t_begin "check stderr" && check_stderr

t_done
rainbows-5.0.0/t/t0002-parser-error.sh0000755000004100000410000000111212641135250017352 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
t_plan 5 "parser error test for $model"

t_begin "setup and startup" && {
	rainbows_setup $model
	rainbows -D env.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "send request" && {
	(
		printf 'GET / HTTP/1/1\r\nHost: example.com\r\n\r\n'
		cat $fifo > $tmp &
		wait
		echo ok > $ok
	) | socat - TCP:$listen > $fifo
	test xok = x$(cat $ok)
}

dbgcat tmp

t_begin "response should be a 400" && {
	grep -F 'HTTP/1.1 400 Bad Request' $tmp
}

t_begin "server stderr should be clean" && check_stderr

t_begin "term signal sent" && kill $rainbows_pid

t_done
rainbows-5.0.0/t/async_sinatra.ru0000644000004100000410000000043012641135250017035 0ustar  www-datawww-data# See http://github.com/raggi/async_sinatra
# gem install async_sinatra -v0.1.5
require 'sinatra/async'

class AsyncTest < Sinatra::Base
  register Sinatra::Async

  aget '/:n' do |n|
    EM.add_timer(n.to_i) { body { "delayed for #{n} seconds\n" } }
  end
end

run AsyncTest.new
rainbows-5.0.0/t/t0027-nil-copy_stream.sh0000644000004100000410000000242312641135250020046 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
test -r random_blob || die "random_blob required, run with 'make $0'"
check_copy_stream

t_plan 7 "large file 'copy_stream nil' test for $model"

t_begin "setup and startup" && {
	rtmpfiles curl_out
	rainbows_setup $model
	cat >> $unicorn_config <$ok) | rsha1)
		test $sha1 = $random_blob_sha1
		test xok = x$(cat $ok)
	done
}

# this was a problem during development
t_begin "HTTP/1.0 test" && {
	sha1=$( (curl -0 -sSfv http://$listen/random_blob &&
	         echo ok >$ok) | rsha1)
	test $sha1 = $random_blob_sha1
	test xok = x$(cat $ok)
}

t_begin "HTTP/0.9 test" && {
	(
		printf 'GET /random_blob\r\n'
		rsha1 < $fifo > $tmp &
		wait
		echo ok > $ok
	) | socat - TCP:$listen > $fifo
	test $(cat $tmp) = $random_blob_sha1
	test xok = x$(cat $ok)
}

t_begin "shutdown server" && {
	kill -QUIT $rainbows_pid
}

t_begin "check stderr" && check_stderr

t_done
rainbows-5.0.0/t/simple-http_Rev.ru0000644000004100000410000000034512641135250017266 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env|
  if env['rack.multithread'] == false && env['rainbows.model'] == :Rev
    [ 200, {}, [ env.inspect << "\n" ] ]
  else
    raise "rack.multithread is true"
  end
}
rainbows-5.0.0/t/file-wrap-to_path.ru0000644000004100000410000000100012641135250017513 0ustar  www-datawww-data# must be run without Rack::Lint since that clobbers to_path
class Wrapper < Struct.new(:app)
  def call(env)
    status, headers, body = app.call(env)
    body = Body.new(body) if body.respond_to?(:to_path)
    [ status, headers, body ]
  end

  class Body < Struct.new(:body)
    def to_path
      body.to_path
    end

    def each(&block)
      body.each(&block)
    end

    def close
      ::File.open(ENV['fifo'], 'wb') { |fp| fp.puts "CLOSING" }
    end
  end
end
use Wrapper
run Rack::File.new(Dir.pwd)
rainbows-5.0.0/t/t0024-pipelined-sendfile-response.sh0000755000004100000410000000405112641135250022334 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll

t_plan 6 "pipelined sendfile response for $model"

t_begin "setup and startup" && {
	rtmpfiles err out dd_fifo
	rainbows_setup $model
	echo 'require "sendfile"' >> $unicorn_config
	echo 'def (::IO).copy_stream(*x); abort "NO"; end' >> $unicorn_config

	# can't load Rack::Lint here since it clobbers body#to_path
	rainbows -E none -D large-file-response.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "read random blob sha1" && {
	random_blob_sha1=$(rsha1 < random_blob)
}

script='
require "digest/sha1"
require "kcar"
$stdin.binmode
expect = ENV["random_blob_sha1"]
kcar = Kcar::Response.new($stdin, {})
3.times do
	nr = 0
	status, headers, body = kcar.rack
	dig = Digest::SHA1.new
	body.each { |buf| dig << buf ; nr += buf.size }
	sha1 = dig.hexdigest
	sha1 == expect or abort "mismatch: sha1=#{sha1} != expect=#{expect}"
	body.close
end
$stdout.syswrite("ok\n")
'

t_begin "staggered pipeline of 3 HTTP requests" && {
	req='GET /random_blob HTTP/1.1\r\nHost: example.com\r\n'
	rm -f $ok
	(
		export random_blob_sha1
		$RUBY -e "$script" < $fifo >> $ok &
		printf "$req"'X-Req:0\r\n\r\n'
		exec 6>&1
		(
			dd bs=16384 count=1
			printf "$req" >&6
			dd bs=16384 count=1
			printf 'X-Req:1\r\n\r\n' >&6
			dd bs=16384 count=1
			printf "$req" >&6
			dd bs=16384 count=1
			printf 'X-Req:2\r\n' >&6
			dd bs=16384 count=1
			printf 'Connection: close\r\n\r' >&6
			dd bs=16384 count=1
			printf '\n' >&6
			cat
		) < $dd_fifo > $fifo &
		wait
		echo ok >> $ok
	) | socat - TCP:$listen > $dd_fifo
	test 2 -eq $(grep '^ok$' $ok |count_lines)
}

t_begin "pipeline 3 HTTP requests" && {
	rm -f $ok
	req='GET /random_blob HTTP/1.1\r\nHost: example.com\r\n'
	req="$req"'\r\n'"$req"'\r\n'"$req"
	req="$req"'Connection: close\r\n\r\n'
	(
		export random_blob_sha1
		$RUBY -e "$script" < $fifo >> $ok &
		printf "$req"
		wait
		echo ok >> $ok
	) | socat - TCP:$listen > $fifo
	test 2 -eq $(grep '^ok$' $ok |count_lines)
}

t_begin "shutdown server" && {
	kill -QUIT $rainbows_pid
}

t_begin "check stderr" && check_stderr

t_done
rainbows-5.0.0/t/t0015-working_directory.sh0000755000004100000410000000264312641135250020511 0ustar  www-datawww-data#!/bin/sh
if test -n "$RBX_SKIP"
then
	echo "$0 is broken under Rubinius for now"
	exit 0
fi
. ./test-lib.sh

t_plan 6 "config.ru inside alt working_directory"

t_begin "setup and start" && {
	rainbows_setup
	rtmpfiles unicorn_config_tmp
	rm -rf $t_pfx.app
	mkdir $t_pfx.app

	cat > $t_pfx.app/config.ru < $unicorn_config_tmp

	# the whole point of this exercise
	echo "working_directory '$t_pfx.app'" >> $unicorn_config_tmp

	# allows ppid to be 1 in before_fork
	echo "preload_app true" >> $unicorn_config_tmp
	cat >> $unicorn_config_tmp <<\EOF
before_fork do |server,worker|
  $master_ppid = Process.ppid # should be zero to detect daemonization
end
EOF

	mv $unicorn_config_tmp $unicorn_config

	# rely on --daemonize switch, no & or -D
	rainbows -c $unicorn_config
	rainbows_wait_start
}

t_begin "reload to avoid race condition" && {
	curl -sSf http://$listen/ >/dev/null
	kill -HUP $rainbows_pid
	test xSTART = x"$(cat $fifo)"
}

t_begin "hit with curl" && {
	body=$(curl -sSf http://$listen/)
}

t_begin "killing succeeds" && {
	kill $rainbows_pid
}

t_begin "response body ppid == 1 (daemonized)" && {
	test "$body" -eq 1
}

t_begin "cleanup working directory" && {
	rm -r $t_pfx.app
}

t_done
rainbows-5.0.0/t/simple-http_XEpollThreadSpawn.ru0000644000004100000410000000036212641135250022075 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env|
  if env['rack.multithread'] == true &&
    env['rainbows.model'] == :XEpollThreadSpawn
    [ 200, {}, [ Thread.current.inspect << "\n" ] ]
  else
    raise env.inspect
  end
}
rainbows-5.0.0/t/t0026-splice-copy_stream-byte-range.sh0000644000004100000410000000111012641135250022565 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
test -r random_blob || die "random_blob required, run with 'make $0'"
check_copy_stream
check_splice

t_plan 13 "IO::Splice.copy_stream byte range response for $model"

t_begin "setup and startup" && {
	rtmpfiles out err
	rainbows_setup $model
	cat >> $unicorn_config <> $ok &
		printf "$req"'X-Req:0\r\n\r\n'
		exec 6>&1
		(
			dd bs=16384 count=1
			printf "$req" >&6
			dd bs=16384 count=1
			printf 'X-Req:1\r\n\r\n' >&6
			dd bs=16384 count=1
			printf "$req" >&6
			dd bs=16384 count=1
			printf 'X-Req:2\r\n' >&6
			dd bs=16384 count=1
			printf 'Connection: close\r\n\r' >&6
			dd bs=16384 count=1
			printf '\n' >&6
			cat
		) < $dd_fifo > $fifo &
		wait
		echo ok >> $ok
	) | socat - TCP:$listen > $dd_fifo
	test 2 -eq $(grep '^ok$' $ok |count_lines)
}

t_begin "pipeline 3 HTTP requests" && {
	rm -f $ok
	req='GET /random_blob HTTP/1.1\r\nHost: example.com\r\n'
	req="$req"'\r\n'"$req"'\r\n'"$req"
	req="$req"'Connection: close\r\n\r\n'
	(
		export random_blob_sha1
		$RUBY -e "$script" < $fifo >> $ok &
		printf "$req"
		wait
		echo ok >> $ok
	) | socat - TCP:$listen > $fifo
	test 2 -eq $(grep '^ok$' $ok |count_lines)
}

t_begin "shutdown server" && {
	kill -QUIT $rainbows_pid
}

t_begin "check stderr" && check_stderr

t_done
rainbows-5.0.0/t/.gitignore0000644000004100000410000000007012641135250015617 0ustar  www-datawww-data/test-results-*
/bin-*
/random_blob
/.dep+*
/trash
/tmp
rainbows-5.0.0/t/t0011-close-on-exec-set.sh0000755000004100000410000000205212641135250020165 0ustar  www-datawww-data#!/bin/sh
nr=${nr-"5"}
. ./test-lib.sh
skip_models StreamResponseEpoll

t_plan 7 "ensure close-on-exec flag is set for $model"

t_begin "setup and start" && {
	rainbows_setup $model 1 1
	nr=$nr rainbows -E none -D fork-sleep.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "send keepalive req expect it to timeout in ~1s" && {
	req='GET / HTTP/1.1\r\nHost: example.com\r\n\r\n'
	t0=$(unix_time)
	(
		cat $fifo > $tmp &
		printf "$req"
		wait
		unix_time > $ok
	) | socat - TCP:$listen > $fifo
	now="$(cat $ok)"
	elapsed=$(( $now - $t0 ))
	t_info "elapsed=$elapsed (expecting >=1s)"
	test $elapsed -ge 1
}

t_begin 'sleep process is still running' && {
	sleep_pid="$(tail -1 $tmp)"
	kill -0 $sleep_pid
}

t_begin 'keepalive not unreasonably long' && {
	test $elapsed -lt $nr
}

t_begin "killing succeeds" && {
	kill $rainbows_pid
}

t_begin "check stderr" && {
	t_info "about to start waiting $nr seconds..."
	sleep $nr
	check_stderr
}

t_begin 'sleep process is NOT running' && {
	if kill -0 $sleep_pid
	then
		die "sleep process should've died"
	fi
}

t_done
rainbows-5.0.0/t/simple-http_FiberPool.ru0000644000004100000410000000037212641135250020413 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env|
  if env['rack.multithread'] == false && env['rainbows.model'] == :FiberPool
    [ 200, {}, [ Thread.current.inspect << "\n" ] ]
  else
    raise "rack.multithread is not true"
  end
}
rainbows-5.0.0/t/cramp/0000755000004100000410000000000012641135250014734 5ustar  www-datawww-datarainbows-5.0.0/t/cramp/README0000644000004100000410000000024612641135250015616 0ustar  www-datawww-dataThese examples in this directory are stolen from Cramp with only trivial
changes.  All examples in this directory retain their original license
(MIT) and copyrights.
rainbows-5.0.0/t/cramp/streaming.ru0000644000004100000410000000100412641135250017270 0ustar  www-datawww-data# based on examples/streaming.rb in git://github.com/lifo/cramp
# commit ca54f8a944ae582a0c858209daf3c74efea7d27c

# Rack::Lint does not like async + EM stuff, so disable it:
#\ -E deployment

require 'cramp'

class StreamController < Cramp::Action
  periodic_timer :send_data, :every => 1
  periodic_timer :check_limit, :every => 2

  def start
    @limit = 0
  end

  def send_data
    render ["Hello World", "\n"]
  end

  def check_limit
    @limit += 1
    finish if @limit > 1
  end

end

run StreamController
rainbows-5.0.0/t/cramp/rainsocket.ru0000644000004100000410000000107212641135250017446 0ustar  www-datawww-data# based on examples/rainsocket.ru git://github.com/lifo/cramp
# Rack::Lint does not like async + EM stuff, so disable it:
#\ -E deployment
require 'cramp'

Cramp::Websocket.backend = :rainbows

class WelcomeController < Cramp::Websocket
  periodic_timer :send_hello_world, :every => 2
  on_data :received_data

  def received_data(data)
    if data =~ /fuck/
      render "You cant say fuck in here"
      finish
    else
      render "Got your #{data}"
    end
  end

  def send_hello_world
    render("Hello from the Server!\n" * 256)
  end
end

run WelcomeController
rainbows-5.0.0/t/simple-http_Base.ru0000644000004100000410000000015012641135250017376 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env| [ 200, {}, [ env.inspect << "\n" ] ] }
rainbows-5.0.0/t/t0000-simple-http.sh0000755000004100000410000000600412641135250017200 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll
t_plan 25 "simple HTTP connection keepalive/pipelining tests for $model"

t_begin "checking for config.ru for $model" && {
	tbase=simple-http_$model.ru
	test -f "$tbase"
}

t_begin "setup and start" && {
	rainbows_setup
	rainbows -D $tbase -c $unicorn_config
	rainbows_wait_start
}

t_begin "pid file exists" && {
	test -f $pid
}

t_begin "single request" && {
	curl -sSfv http://$listen/
}

t_begin "handles client EOF gracefully" && {
	printf 'GET / HTTP/1.1\r\nHost: example.com\r\n\r\n' | \
		socat - TCP4:$listen > $tmp
	dbgcat tmp
	if grep 'HTTP.* 500' $tmp
	then
		die "500 error returned on client shutdown(SHUT_WR)"
	fi
	check_stderr
}

dbgcat r_err

t_begin "two requests with keepalive" && {
	curl -sSfv http://$listen/a http://$listen/b > $tmp 2>&1
}

dbgcat r_err
dbgcat tmp

t_begin "reused existing connection" && {
	grep 'Re-using existing connection' < $tmp
}

t_begin "pipelining partial requests" && {
	req='GET / HTTP/1.1\r\nHost: example.com\r\n'
	(
		cat $fifo > $tmp &
		printf "$req"'\r\n'"$req"
		sleep 1
		printf 'Connection: close\r\n\r\n'
		wait
		echo ok > $ok
	) | socat - TCP:$listen > $fifo
}
dbgcat tmp

t_begin "two HTTP/1.1 responses" && {
	test 2 -eq $(grep '^HTTP/1.1' $tmp | count_lines)
}

t_begin "two HTTP/1.1 200 OK responses" && {
	test 2 -eq $(grep '^HTTP/1.1 200 OK' $tmp | count_lines)
}

t_begin 'one "Connection: keep-alive" response' && {
	test 1 -eq $(grep '^Connection: keep-alive' $tmp | count_lines)
}

t_begin 'one "Connection: close" response' && {
	test 1 -eq $(grep '^Connection: close' $tmp | count_lines)
}

t_begin 'check subshell success' && {
	test x"$(cat $ok)" = xok
}


t_begin "check stderr" && {
	check_stderr
}

t_begin "burst pipelining requests" && {
	req='GET / HTTP/1.1\r\nHost: example.com\r\n'
	(
		cat $fifo > $tmp &
		printf "$req"'\r\n'"$req"'Connection: close\r\n\r\n'
		wait
		echo ok > $ok
	) | socat - TCP:$listen > $fifo
}

dbgcat tmp
dbgcat r_err

t_begin "got 2 HTTP/1.1 responses from pipelining" && {
	test 2 -eq $(grep '^HTTP/1.1' $tmp | count_lines)
}

t_begin "got 2 HTTP/1.1 200 OK responses" && {
	test 2 -eq $(grep '^HTTP/1.1 200 OK' $tmp | count_lines)
}

t_begin "one keepalive connection" && {
	test 1 -eq $(grep '^Connection: keep-alive' $tmp | count_lines)
}

t_begin "second request closes connection" && {
	test 1 -eq $(grep '^Connection: close' $tmp | count_lines)
}

t_begin "subshell exited correctly" && {
	test x"$(cat $ok)" = xok
}

t_begin "stderr log has no errors" && {
	check_stderr
}

t_begin "HTTP/0.9 request should not return headers" && {
	(
		printf 'GET /\r\n'
		cat $fifo > $tmp &
		wait
		echo ok > $ok
	) | socat - TCP:$listen > $fifo
}

dbgcat tmp
dbgcat r_err

t_begin "env.inspect should've put everything on one line" && {
	test 1 -eq $(count_lines < $tmp)
}

t_begin "no headers in output" && {
	if grep ^Connection: $tmp
	then
		die "Connection header found in $tmp"
	elif grep ^HTTP/ $tmp
	then
		die "HTTP/ found in $tmp"
	fi
}

t_begin "killing succeeds" && {
	kill $rainbows_pid
}

t_done
rainbows-5.0.0/t/t0018-reload-restore-settings.sh0000644000004100000410000000213712641135250021530 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
t_plan 8 "reload restore settings for $model"

t_begin "setup and start" && {
	rtmpfiles orig_config
	rainbows_setup
	cat $unicorn_config > $orig_config
	rainbows -D -c $unicorn_config -l $listen env.ru
	rainbows_wait_start
}

t_begin "HTTP request confirms we're running the correct model" && {
	curl -sSfv http://$listen/ | grep "\"rainbows.model\"=>:$model"
}

t_begin "clobber config and reload" && {
	cat > $unicorn_config <:Base" >/dev/null
}

t_begin "restore config and reload" && {
	cat $orig_config > $unicorn_config
	> $r_err
	kill -HUP $rainbows_pid
	rainbows_wait_start
	wait_for_reload
	wait_for_reap
}

t_begin "HTTP request confirms we're back on the correct model" && {
	curl -sSfv http://$listen/ | \
	  grep "\"rainbows.model\"=>:$model" >/dev/null
}

t_begin "killing succeeds" && {
	kill $rainbows_pid
}

t_begin "check stderr" && {
	check_stderr
}

t_done
rainbows-5.0.0/t/t0023-sendfile-byte-range.sh0000755000004100000410000000127312641135250020566 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll
test -r random_blob || die "random_blob required, run with 'make $0'"
case $RUBY_ENGINE in
ruby) ;;
*)
	t_info "skipping $T since it can't load the sendfile gem, yet"
	exit 0
	;;
esac

skip_models EventMachine NeverBlock

t_plan 13 "sendfile byte range response for $model"

t_begin "setup and startup" && {
	rtmpfiles out err
	rainbows_setup $model
	echo 'require "sendfile"' >> $unicorn_config
	echo 'def (::IO).copy_stream(*x); abort "NO"; end' >> $unicorn_config

	# can't load Rack::Lint here since it clobbers body#to_path
	rainbows -E none -D large-file-response.ru -c $unicorn_config
	rainbows_wait_start
}

. ./byte-range-common.sh
rainbows-5.0.0/t/async-response-no-autochunk.ru0000644000004100000410000000106412641135250021565 0ustar  www-datawww-datause Rack::Chunked
use Rainbows::DevFdResponse
script_chunked = <<-EOF
for i in 0 1 2 3 4 5 6 7 8 9
do
	printf '1\r\n%s\r\n' $i
	sleep 1
done
printf '0\r\n\r\n'
EOF

script_identity = <<-EOF
for i in 0 1 2 3 4 5 6 7 8 9
do
	printf $i
	sleep 1
done
EOF

run lambda { |env|
  env['rainbows.autochunk'] = false
  headers = { 'Content-Type' => 'text/plain' }

  script = case env["HTTP_VERSION"]
  when nil, "HTTP/1.0"
    script_identity
  else
    headers['Transfer-Encoding'] = 'chunked'
    script_chunked
  end

  [ 200, headers, IO.popen(script, 'rb') ].freeze
}
rainbows-5.0.0/t/t0041-optional-pool-size.sh0000755000004100000410000000172012641135250020503 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh

case $model in
NeverBlock|CoolioThreadPool|XEpollThreadPool) ;;
*)
	t_info "skipping $model.$T since it doesn't support :pool_size"
	exit
	;;
esac

t_plan 6 "optional :pool_size argument for $model"

t_begin "setup and startup" && {
	rtmpfiles curl_out curl_err
	rainbows_setup $model
}

t_begin "fails with bad :pool_size" && {
	ed -s $unicorn_config < -666/
w
EOF
	grep "pool_size" $unicorn_config
	rainbows -D env.ru -c $unicorn_config || echo err=$? > $ok
	test x"$(cat $ok)" = "xerr=1"
}

t_begin "starts with correct :pool_size" && {
	ed -s $unicorn_config < 6/
w
EOF
	grep "pool_size" $unicorn_config
	rainbows -D env.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "regular TCP request works right" && {
	curl -sSfv http://$listen/
}

t_begin "no errors in stderr" && {
	check_stderr
}

t_begin "shutdown" && {
	kill $rainbows_pid
}

t_done
rainbows-5.0.0/t/t0202-async-response-one-oh.sh0000755000004100000410000000225012641135250021071 0ustar  www-datawww-data#!/bin/sh
CONFIG_RU=${CONFIG_RU-'async-response.ru'}
. ./test-lib.sh

skip_models Base WriterThreadPool WriterThreadSpawn
skip_models StreamResponseEpoll

t_plan 6 "async HTTP/1.0 response for $model"

t_begin "setup and start" && {
	rainbows_setup
	rtmpfiles a b c a_err b_err c_err
	# can't load Rack::Lint here since it'll cause Rev to slurp
	rainbows -E none -D $CONFIG_RU -c $unicorn_config
	rainbows_wait_start
}

t_begin "send async requests off in parallel" && {
	t0=$(unix_time)
	curl="curl -0 --no-buffer -vsSf http://$listen/"
	( $curl 2>> $a_err | tee $a) &
	( $curl 2>> $b_err | tee $b) &
	( $curl 2>> $c_err | tee $c) &
	wait
	t1=$(unix_time)
}

t_begin "ensure elapsed requests were processed in parallel" && {
	elapsed=$(( $t1 - $t0 ))
	echo "elapsed=$elapsed < 30"
	test $elapsed -lt 30
}

t_begin "termination signal sent" && {
	kill $rainbows_pid
}

dbgcat a
dbgcat b
dbgcat c
dbgcat a_err

t_begin "no errors from curl" && {
	if grep -i Transfer-Encoding $a_err $b_err $c_err
	then
		die "Unexpected Transfer-Encoding: header"
	fi
	for i in $a_err $b_err $c_err
	do
		grep 'Connection: close' $i
	done
}

dbgcat r_err
t_begin "no errors in stderr" && check_stderr

t_done
rainbows-5.0.0/t/simple-http_FiberSpawn.ru0000644000004100000410000000037312641135250020573 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env|
  if env['rack.multithread'] == false && env['rainbows.model'] == :FiberSpawn
    [ 200, {}, [ Thread.current.inspect << "\n" ] ]
  else
    raise "rack.multithread is not true"
  end
}
rainbows-5.0.0/t/t0700-app-deferred.sh0000755000004100000410000000214412641135250017300 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
case $model in
EventMachine) ;;
*)
	t_info "skipping $T since it's not compatible with $model"
	exit 0
	;;
esac

t_plan 5 "basic test for app.deferred? usage"

CONFIG_RU=app_deferred.ru

t_begin "setup and start" && {
	rainbows_setup
	rtmpfiles deferred_err deferred_out sync_err sync_out
	rainbows -D -c $unicorn_config $CONFIG_RU
	rainbows_wait_start
}

t_begin "synchronous requests run in the same thread" && {
	curl --no-buffer -sSf http://$listen/ >> $sync_out 2>> $sync_err &
	curl --no-buffer -sSf http://$listen/ >> $sync_out 2>> $sync_err &
	curl --no-buffer -sSf http://$listen/ >> $sync_out 2>> $sync_err &
	wait
	test ! -s $sync_err
	test 3 -eq "$(count_lines < $sync_out)"
	test 1 -eq "$(uniq < $sync_out | count_lines)"
}

t_begin "deferred requests run in a different thread" && {
	curl -sSf http://$listen/deferred >> $deferred_out 2>> $deferred_err
	test ! -s $deferred_err
	sync_thread="$(uniq < $sync_out)"
	test x"$(uniq < $deferred_out)" != x"$sync_thread"
}

t_begin "termination signal sent" && {
	kill $rainbows_pid
}

t_begin "no errors in stderr" && check_stderr

t_done
rainbows-5.0.0/t/t0012-spurious-wakeups-quiet.sh0000755000004100000410000000143512641135250021433 0ustar  www-datawww-data#!/bin/sh
nr=${nr-4}
. ./test-lib.sh

# ApacheBench (ab) is commonly installed in the sbin paths in Debian-based
# systems...
AB="$(which ab 2>/dev/null || :)"
if test -z "$AB"
then
	AB=$(PATH=/usr/local/sbin:/usr/sbin:$PATH which ab 2>/dev/null || :)
fi

if test -z "$AB"
then
	t_info "skipping $T since 'ab' could not be found"
	exit 0
fi

t_plan 4 "quiet spurious wakeups for $model"

t_begin "setup and start" && {
	rainbows_setup $model
	echo "preload_app true" >> $unicorn_config
	echo "worker_processes $nr" >> $unicorn_config
	rainbows -D env.ru -c $unicorn_config -E none
	rainbows_wait_start
}

t_begin "spam the server with requests" && {
	$AB -c1 -n100 http://$listen/
}

t_begin "killing succeeds" && {
	kill -QUIT $rainbows_pid
}

t_begin "check stderr" && {
	check_stderr
}

t_done
rainbows-5.0.0/t/byte-range-common.sh0000644000004100000410000001012412641135250017507 0ustar  www-datawww-datat_begin "byte-range setup vars" && {
	random_blob_size=$(count_bytes < random_blob)
	rb_1=$(( $random_blob_size - 1 ))
	range_head=-r-365
	range_tail=-r155-
	range_mid=-r200-300
	range_n1=-r0-$rb_1
	range_n2=-r0-$(($rb_1 - 1))
	range_1b_head=-r0-0
	range_1b_tail=-r$rb_1-$rb_1
	range_1b_mid=-r200-200
	range_all=-r0-$random_blob_size
	url=http://$listen/random_blob
}

check_content_range () {
	grep '^< HTTP/1\.1 206 Partial Content' $err
	grep 'Range:' $err
	# Content-Range: bytes #{offset}-#{offset+count-1}/#{clen}
	d='\([0-9]\+\)'
	start= end= size=
	eval $(< $err sed -n -e \
	  "s/^< Content-Range: bytes $d-$d\/$d"'.*$/start=\1 end=\2 size=\3/p')
	test -n "$start"
	test -n "$end"
	test -n "$size"

	# ensure we didn't screw up the sed invocation
	expect="< Content-Range: bytes $start-$end/$size"
	test x"$(grep -F "$expect" $err)" = x"$(grep '^< Content-Range:' $err)"

	test $start -le $end
	test $end -lt $size
}

t_begin "read random blob sha1s" && {
	sha1_head=$(curl -sSff $range_head file://random_blob | rsha1)
	sha1_tail=$(curl -sSff $range_tail file://random_blob | rsha1)
	sha1_mid=$(curl -sSff $range_mid file://random_blob | rsha1)
	sha1_n1=$(curl -sSff $range_n1 file://random_blob | rsha1)
	sha1_n2=$(curl -sSff $range_n2 file://random_blob | rsha1)
	sha1_1b_head=$(curl -sSff $range_1b_head file://random_blob | rsha1)
	sha1_1b_tail=$(curl -sSff $range_1b_tail file://random_blob | rsha1)
	sha1_1b_mid=$(curl -sSff $range_1b_mid file://random_blob | rsha1)
	sha1_all=$(rsha1 < random_blob)
	echo "$sha1_all=$sha1_n1"
}

t_begin "normal full request matches" && {
	sha1="$(curl -v 2>$err -sSf $url | rsha1)"
	test x"$sha1_all" = x"$sha1"
	grep 'Content-Range:' $err && die "Content-Range unexpected"
	grep 'HTTP/1.1 200 OK' $err || die "200 response expected"
}

t_begin "crazy offset goes over" && {
	range_insane=-r$(($random_blob_size * 2))-$(($random_blob_size * 4))
	curl -vsS 2>$err $range_insane $url >/dev/null
	grep '^< HTTP/1\.[01] 416 ' $err || die "expected 416 error"
	grep '^< Content-Range: bytes \*/'$random_blob_size $err || \
          die "expected Content-Range: bytes */SIZE"
}

t_begin "keepalive/pipelining is supported on 416 responses" && {
	rm -f $tmp
	(
		cat $fifo > $tmp &
		printf 'GET /byte-range-common.sh HTTP/1.1\r\n'
		printf 'Host: %s\r\n' $listen
		printf 'Range: bytes=9999999999-9999999999\r\n\r\n'
		printf 'GET /byte-range-common.sh HTTP/1.1\r\n'
		printf 'Host: %s\r\n' $listen
		printf 'Connection: close\r\n'
		printf 'Range: bytes=0-0\r\n\r\n'
		wait
	) | socat - TCP:$listen > $fifo

	< $tmp awk '
/^HTTP\/1\.1 / && NR == 1 && $2 == 416 { first = $2 }
/^HTTP\/1\.1 / && NR != 1 && $2 == 206 { second = $2 }
END { exit((first == 416 && second == 206) ? 0 : 1) }
	'
}

t_begin "full request matches with explicit ranges" && {
	sha1="$(curl -v 2>$err $range_all -sSf $url | rsha1)"
	check_content_range
	test x"$sha1_all" = x"$sha1"

	sha1="$(curl -v 2>$err $range_n1 -sSf $url | rsha1)"
	check_content_range
	test x"$sha1_all" = x"$sha1"

	range_over=-r0-$(($random_blob_size * 2))
	sha1="$(curl -v 2>$err $range_over -sSf $url | rsha1)"
	check_content_range
	test x"$sha1_all" = x"$sha1"
}

t_begin "no fence post errors" && {
	sha1="$(curl -v 2>$err $range_n2 -sSf $url | rsha1)"
	check_content_range
	test x"$sha1_n2" = x"$sha1"

	sha1="$(curl -v 2>$err $range_1b_head -sSf $url | rsha1)"
	check_content_range
	test x"$sha1_1b_head" = x"$sha1"

	sha1="$(curl -v 2>$err $range_1b_tail -sSf $url | rsha1)"
	check_content_range
	test x"$sha1_1b_tail" = x"$sha1"

	sha1="$(curl -v 2>$err $range_1b_mid -sSf $url | rsha1)"
	check_content_range
	test x"$sha1_1b_mid" = x"$sha1"
}

t_begin "head range matches" && {
	sha1="$(curl -sSfv 2>$err $range_head $url | rsha1)"
	check_content_range
	test x"$sha1_head" = x"$sha1"
}

t_begin "tail range matches" && {
	sha1="$(curl -sSfv 2>$err $range_tail $url | rsha1)"
	check_content_range
	test x"$sha1_tail" = x"$sha1"
}

t_begin "mid range matches" && {
	sha1="$(curl -sSfv 2>$err $range_mid $url | rsha1)"
	check_content_range
	test x"$sha1_mid" = x"$sha1"
}

t_begin "shutdown server" && {
	kill -QUIT $rainbows_pid
}

t_begin "check stderr" && check_stderr

t_done
rainbows-5.0.0/t/t0006-process-rack-env.sh0000755000004100000410000000160012641135250020117 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh

t_plan 4 'ensure ENV["RACK_ENV"] is set correctly for '$model

finish_checks () {
	kill $rainbows_pid
	test ! -s $curl_err
	check_stderr
}

t_begin "setup" && {
	rtmpfiles curl_out curl_err
}

t_begin "default RACK_ENV is 'development'" && {
	rainbows_setup
	rainbows -D -c $unicorn_config env_rack_env.ru
	rainbows_wait_start
	test x"$(curl -sSf http://$listen 2>$curl_err)" = x"development"
	finish_checks
}

t_begin "RACK_ENV from process ENV is inherited" && {
	rainbows_setup
	( RACK_ENV=production rainbows -D -c $unicorn_config env_rack_env.ru )
	rainbows_wait_start
	test x$(curl -sSf http://$listen 2>$curl_err) = x"production"
	finish_checks
}

t_begin "RACK_ENV from -E is set" && {
	rainbows_setup
	rainbows -D -c $unicorn_config -E none env_rack_env.ru
	rainbows_wait_start
	test x$(curl -sSf http://$listen 2>$curl_err) = x"none"
	finish_checks
}

t_done
rainbows-5.0.0/t/t9100-thread-timeout.sh0000755000004100000410000000215412641135250017701 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
check_threaded_app_dispatch

t_plan 6 "ThreadTimeout Rack middleware test for $model"

t_begin "configure and start" && {
	rtmpfiles curl_err
	rainbows_setup
	rainbows -D t9100.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "normal request should not timeout" && {
	test x"HI" = x"$(curl -sSf http://$listen/ 2>> $curl_err)"
}

t_begin "sleepy request times out with 408" && {
	rm -f $ok
	curl -sSf http://$listen/2 2>> $curl_err || > $ok
	test -e $ok
	grep 408 $curl_err
}

t_begin "short requests do not timeout while making a long one" && {
	rm -f $ok $curl_err
	> $ok
	curl -sSf http://$listen/2 2>$curl_err >/dev/null &
	(
		for i in $(awk =0;)print i}')
		do
			curl -sSf http://$listen/0.1 >> $ok 2>&1 &
			test x"HI" = x"$(curl -sSf http://$listen/0.05)"
		done
		wait
	)
	test x"HI" = x"$(curl -sSf http://$listen/)"
	wait
	test -f $ok
	test 20 -eq $(grep '^HI$' $ok | count_lines)
	test x = x"$(grep -v '^HI$' $ok)"
	grep 408 $curl_err
}

t_begin "kill server" && {
	kill $rainbows_pid
}

t_begin "no errors in Rainbows! stderr" && {
	check_stderr
}

t_done
rainbows-5.0.0/t/t0401-em-async-tailer.sh0000755000004100000410000000271012641135250017731 0ustar  www-datawww-data#!/bin/sh
nr=${nr-5}
. ./test-lib.sh
case $model in
NeverBlock|EventMachine) ;;
*)
	t_info "skipping $T since it's not compatible with $model"
	exit 0
	;;
esac

t_plan 8 "async_tailer test for test for EM"

CONFIG_RU=async_examples/async_tailer.ru

t_begin "setup and start" && {
	rainbows_setup
	rtmpfiles a b c curl_err TAIL_LOG_FILE expect

	printf '

Async Tailer

' >> $expect

	export TAIL_LOG_FILE

	# this does not does not support Rack::Lint
	rainbows -E deployment -D $CONFIG_RU -c $unicorn_config
	rainbows_wait_start
}

t_begin "send async requests off in parallel" && {
	t0=$(unix_time)
	curl --no-buffer -sSf http://$listen/ > $a 2>> $curl_err &
	curl_a=$!
	curl --no-buffer -sSf http://$listen/ > $b 2>> $curl_err &
	curl_b=$!
	curl --no-buffer -sSf http://$listen/ > $c 2>> $curl_err &
	curl_c=$!
}

t_begin "generate log output" && {
	for i in $(awk "BEGIN {for(i=0;i<$nr;i++) print i}" < /dev/null)
	do
		date >> $TAIL_LOG_FILE
		sleep 1
	done
	# sometimes tail(1) can be slow
	sleep 2
}

t_begin "kill curls and wait for termination" && {
	kill $curl_a $curl_b $curl_c
	wait
	t1=$(unix_time)
	elapsed=$(( $t1 - $t0 ))
	t_info "elapsed=$elapsed"
}

t_begin "termination signal sent" && {
	kill -QUIT $rainbows_pid
}

t_begin "no errors from curl" && {
	test ! -s $curl_err
}

t_begin "no errors in stderr" && check_stderr

t_begin "responses match expected" && {
	cat $TAIL_LOG_FILE >> $expect
	cmp $expect $a
	cmp $expect $b
	cmp $expect $c
}

t_done
rainbows-5.0.0/t/simple-http_XEpollThreadPool.ru0000644000004100000410000000036112641135250021715 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env|
  if env['rack.multithread'] == true &&
    env['rainbows.model'] == :XEpollThreadPool
    [ 200, {}, [ Thread.current.inspect << "\n" ] ]
  else
    raise env.inspect
  end
}
rainbows-5.0.0/t/write-on-close.ru0000644000004100000410000000037312641135250017054 0ustar  www-datawww-dataclass WriteOnClose
  def each(&block)
    @callback = block
  end

  def close
    @callback.call "7\r\nGoodbye\r\n0\r\n\r\n"
  end
end
use Rack::ContentType, "text/plain"
run(lambda { |_| [ 200, [%w(Transfer-Encoding chunked)], WriteOnClose.new ] })
rainbows-5.0.0/t/t9001.ru0000644000004100000410000000027612641135250014764 0ustar  www-datawww-datause Rainbows::Sendfile
run lambda { |env|
  path = "#{Dir.pwd}/random_blob"
  [ 200,
    {
      'X-Sendfile' => path,
      'Content-Type' => 'application/octet-stream'
    },
    []
  ]
}
rainbows-5.0.0/t/async_chunk_app.ru0000644000004100000410000000315412641135250017352 0ustar  www-datawww-data# based on async_examples/async_app.ru by James Tucker
class DeferrableChunkBody
  include EventMachine::Deferrable

  def call(*body)
    body.each do |chunk|
      @body_callback.call("#{chunk.size.to_s(16)}\r\n")
      @body_callback.call(chunk)
      @body_callback.call("\r\n")
    end
  end

  def each(&block)
    @body_callback = block
  end

  def finish
    @body_callback.call("0\r\n\r\n")
  end
end if defined?(EventMachine)

class AsyncChunkApp
  def call(env)
    headers = {
      'Content-Type' => 'text/plain',
      'Transfer-Encoding' => 'chunked',
    }
    delay = env["HTTP_X_DELAY"].to_i

    case env["rainbows.model"]
    when :EventMachine, :NeverBlock
      body = DeferrableChunkBody.new
      body.callback { body.finish }
      task = lambda {
        env['async.callback'].call([ 200, headers, body ])
        EM.add_timer(1) {
          body.call "Hello "

          EM.add_timer(1) {
            body.call "World #{env['PATH_INFO']}\n"
            body.succeed
          }
        }
      }
      delay == 0 ? EM.next_tick(&task) : EM.add_timer(delay, &task)
    when :Coolio
      # Cool.io only does one-shot responses due to the lack of the
      # equivalent of EM::Deferrables
      body = [ "Hello ", "World #{env['PATH_INFO']}\n", '' ].map do |chunk|
        "#{chunk.size.to_s(16)}\r\n#{chunk}\r\n"
      end

      next_tick = Coolio::TimerWatcher.new(delay, false)
      next_tick.on_timer { env['async.callback'].call([ 200, headers, body ]) }
      next_tick.attach(Coolio::Loop.default)
    else
      raise "Not supported: #{env['rainbows.model']}"
    end
    nil
  end
end
run AsyncChunkApp.new
rainbows-5.0.0/t/t0102-rack-input-short.sh0000755000004100000410000000134412641135250020151 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
test -r random_blob || die "random_blob required, run with 'make $0'"

t_plan 4 "rack.input short read tests"

t_begin "setup and startup" && {
	rtmpfiles curl_out curl_err
	rainbows_setup $model
	rainbows -D sha1-random-size.ru -c $unicorn_config
	blob_sha1=$(rsha1 < random_blob)
	t_info "blob_sha1=$blob_sha1"
	rainbows_wait_start
}

t_begin "regular request" && {
	curl -sSf -T random_blob http://$listen/ > $curl_out 2> $curl_err
	test x$blob_sha1 = x$(cat $curl_out)
	test ! -s $curl_err
}

t_begin "chunked request" && {
	curl -sSf -T- < random_blob http://$listen/ > $curl_out 2> $curl_err
	test x$blob_sha1 = x$(cat $curl_out)
	test ! -s $curl_err
}

t_begin "shutdown" && {
	kill $rainbows_pid
}

t_done
rainbows-5.0.0/t/simple-http_ThreadSpawn.ru0000644000004100000410000000036312641135250020752 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env|
  if env['rack.multithread'] && env['rainbows.model'] == :ThreadSpawn
    [ 200, {}, [ Thread.current.inspect << "\n" ] ]
  else
    raise "rack.multithread is not true"
  end
}
rainbows-5.0.0/t/t0002-graceful.sh0000755000004100000410000000122412641135250016523 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh

t_plan 4 "graceful exit test for $model"

t_begin "setup and startup" && {
	rtmpfiles curl_out
	rainbows_setup $model
	rainbows -D sleep.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "send a request and SIGQUIT while request is processing" && {
	curl -sSfv -T-  $curl_out 2> $fifo &
	awk -v rainbows_pid=$rainbows_pid '
{ print $0 }
/100 Continue/ {
	print "awk: sending SIGQUIT to", rainbows_pid
	system("kill -QUIT "rainbows_pid)
}' $fifo
	wait
}

dbgcat r_err

t_begin 'response returned "Hello"' && {
	test x$(cat $curl_out) = xHello
}

t_begin 'stderr has no errors' && check_stderr

t_done
rainbows-5.0.0/t/t0100-rack-input-hammer-chunked.sh0000755000004100000410000000247712641135250021710 0ustar  www-datawww-datanr_client=${nr_client-4}
. ./test-lib.sh
test -r random_blob || die "random_blob required, run with 'make $0'"

# basically we don't trust our own implementation of content-md5-put
# nor our Ruby 1.9 knowledge nor proper use of encodings in Ruby.
# So we try to use things like curl and sha1sum that are implemented
# without the Ruby interpreter to validate our own Ruby internals.

t_plan 7 "concurrent rack.input hammer stress test (chunked)"

t_begin "setup and startup" && {
	rtmpfiles curl_out curl_err
	rainbows_setup $model
	rainbows -D sha1.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "send $nr_client concurrent requests" && {
	start=$(unix_time)
	for i in $(awk "BEGIN{for(i=0;i<$nr_client;++i) print i}" > $curl_out 2>> $curl_err
		) &
	done
	wait
	t_info elapsed=$(( $(unix_time) - $start ))
}

t_begin "kill server" && kill $rainbows_pid

t_begin "got $nr_client responses" && {
	test $nr_client -eq $(count_lines < $curl_out)
}

t_begin "all responses identical" && {
	test 1 -eq $(sort < $curl_out | uniq | count_lines)
}

t_begin "sha1 matches on-disk sha1" && {
	blob_sha1=$(rsha1 < random_blob)
	t_info blob_sha1=$blob_sha1
	test x"$blob_sha1" = x"$(sort < $curl_out | uniq)"
}

t_begin "no errors in stderr log" && check_stderr

t_done
rainbows-5.0.0/t/t0042-client_header_buffer_size.sh0000644000004100000410000000304112641135250022104 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh

t_plan 8 "client_header_buffer_size tests for $model"

t_begin "setup and startup" && {
	rainbows_setup $model
}

t_begin "fails with zero buffer size" && {
	ed -s $unicorn_config < $ok
	test x"$(cat $ok)" = "xerr=1"
}

t_begin "fails with negative value" && {
	ed -s $unicorn_config < $ok
	test x"$(cat $ok)" = "xerr=1"
}

t_begin "fails with negative value" && {
	ed -s $unicorn_config < $ok
	test x"$(cat $ok)" = "xerr=1"
}

t_begin "starts with correct value" && {
	ed -s $unicorn_config <> $expect
	echo 'Hello World /1' >> $expect
	echo 'Hello World /2' >> $expect
}

t_begin "async.callback supports pipelining" && {
	rm -f $tmp
	t0=$(unix_time)
	(
		cat $fifo > $tmp &
		printf 'GET /0 HTTP/1.1\r\nHost: example.com\r\n\r\n'
		printf 'GET /1 HTTP/1.1\r\nHost: example.com\r\n\r\n'
		printf 'GET /2 HTTP/1.0\r\nHost: example.com\r\n\r\n'
		wait
	) | socat - TCP:$listen > $fifo
	t1=$(unix_time)
	elapsed=$(( $t1 - $t0 ))
	t_info "elapsed=$elapsed $model.$0 ($t_current)"
	test 3 -eq "$(fgrep 'HTTP/1.1 200 OK' $tmp | count_lines)"
	test 3 -eq "$(grep '^Hello ' $tmp | count_lines)"
	test 3 -eq "$(grep 'World ' $tmp | count_lines)"
}

t_begin "async.callback supports delayed pipelining" && {
	rm -f $tmp
	t0=$(unix_time)
	(
		cat $fifo > $tmp &
		printf 'GET /0 HTTP/1.1\r\nHost: example.com\r\n\r\n'
		sleep 1
		printf 'GET /1 HTTP/1.1\r\nHost: example.com\r\n\r\n'
		sleep 1
		printf 'GET /2 HTTP/1.0\r\nHost: example.com\r\n\r\n'
		wait
	) | socat - TCP:$listen > $fifo
	t1=$(unix_time)
	elapsed=$(( $t1 - $t0 ))
	t_info "elapsed=$elapsed $model.$0 ($t_current)"
	test 3 -eq "$(fgrep 'HTTP/1.1 200 OK' $tmp | count_lines)"
	test 3 -eq "$(grep '^Hello ' $tmp | count_lines)"
	test 3 -eq "$(grep 'World ' $tmp | count_lines)"
}

t_begin "async.callback supports pipelining with delay $DELAY" && {
	rm -f $tmp
	t0=$(unix_time)
	(
		cat $fifo > $tmp &
		printf 'GET /0 HTTP/1.1\r\nX-Delay: %d\r\n' $DELAY
		printf 'Host: example.com\r\n\r\n'
		printf 'GET /1 HTTP/1.1\r\nX-Delay: %d\r\n' $DELAY
		printf 'Host: example.com\r\n\r\n'
		printf 'GET /2 HTTP/1.0\r\nX-Delay: %d\r\n' $DELAY
		printf 'Host: example.com\r\n\r\n'
		wait
	) | socat - TCP:$listen > $fifo
	t1=$(unix_time)
	elapsed=$(( $t1 - $t0 ))
	min=$(( $DELAY * 3 ))
	t_info "elapsed=$elapsed $model.$0 ($t_current) min=$min"
	test $elapsed -ge $min
	test 3 -eq "$(fgrep 'HTTP/1.1 200 OK' $tmp | count_lines)"
	test 3 -eq "$(grep '^Hello ' $tmp | count_lines)"
	test 3 -eq "$(grep 'World ' $tmp | count_lines)"
}

t_begin "async.callback supports keepalive" && {
	t0=$(unix_time)
	curl -v --no-buffer -sSf http://$listen/[0-2] > $tmp 2>> $curl_err
	t1=$(unix_time)
	elapsed=$(( $t1 - $t0 ))
	t_info "elapsed=$elapsed $model.$0 ($t_current)"
	cmp $expect $tmp
	test 2 -eq "$(fgrep 'Re-using existing connection!' $curl_err |count_lines)"
	rm -f $curl_err
}

t_begin "async.callback supports keepalive with delay $DELAY" && {
	t0=$(unix_time)
	curl -v --no-buffer -sSf -H "X-Delay: $DELAY" \
	  http://$listen/[0-2] > $tmp 2>> $curl_err
	t1=$(unix_time)
	elapsed=$(( $t1 - $t0 ))
	min=$(( $DELAY * 3 ))
	t_info "elapsed=$elapsed $model.$0 ($t_current) min=$min"
	test $elapsed -ge $min
	cmp $expect $tmp
	test 2 -eq "$(fgrep 'Re-using existing connection!' $curl_err |count_lines)"
	rm -f $curl_err
}

t_begin "send async requests off in parallel" && {
	t0=$(unix_time)
	curl --no-buffer -sSf http://$listen/[0-2] > $a 2>> $curl_err &
	curl --no-buffer -sSf http://$listen/[0-2] > $b 2>> $curl_err &
	curl --no-buffer -sSf http://$listen/[0-2] > $c 2>> $curl_err &
}

t_begin "wait for curl terminations" && {
	wait
	t1=$(unix_time)
	elapsed=$(( $t1 - $t0 ))
	t_info "elapsed=$elapsed"
}

t_begin "termination signal sent" && {
	kill $rainbows_pid
}

t_begin "no errors from curl" && {
	test ! -s $curl_err
}

t_begin "no errors in stderr" && check_stderr

t_begin "responses match expected" && {
	cmp $expect $a
	cmp $expect $b
	cmp $expect $c
}

t_done

rainbows-5.0.0/t/simple-http_CoolioThreadSpawn.ru0000644000004100000410000000035312641135250022116 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env|
  if env['rack.multithread'] && env['rainbows.model'] == :CoolioThreadSpawn
    [ 200, {}, [ env.inspect << "\n" ] ]
  else
    raise "rack.multithread is false"
  end
}
rainbows-5.0.0/t/t9000.ru0000644000004100000410000000033412641135250014756 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
use Rainbows::AppPool, :size => ENV['APP_POOL_SIZE'].to_i
class Sleeper
  def call(env)
    Rainbows.sleep(1)
    [ 200, {}, [ "#{object_id}\n" ] ]
  end
end
run Sleeper.new
rainbows-5.0.0/t/t0800-rack-hijack.sh0000755000004100000410000000102712641135250017111 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
t_plan 5 "rack.hijack tests (Rack 1.5+ (Rack::VERSION >= [1,2]))"

t_begin "setup and start" && {
	rainbows_setup
	rainbows -D -c $unicorn_config hijack.ru
	rainbows_wait_start
}

t_begin "check request hijack" && {
	test "xrequest.hijacked" = x"$(curl -sSfv http://$listen/hijack_req)"
}

t_begin "check response hijack" && {
	test "xresponse.hijacked" = x"$(curl -sSfv http://$listen/hijack_res)"
}

t_begin "killing succeeds" && {
	kill $rainbows_pid
}

t_begin "check stderr" && {
	check_stderr
}

t_done
rainbows-5.0.0/t/simple-http_CoolioFiberSpawn.ru0000644000004100000410000000036212641135250021736 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env|
  if env['rack.multithread'] == false &&
    env['rainbows.model'] == :CoolioFiberSpawn
    [ 200, {}, [ Thread.current.inspect << "\n" ] ]
  else
    raise env.inspect
  end
}
rainbows-5.0.0/t/t9002.ru0000644000004100000410000000016612641135250014763 0ustar  www-datawww-datarequire 'rainbows/server_token'
require 'rack/lobster'
use Rack::Head
use Rainbows::ServerToken
run Rack::Lobster.new
rainbows-5.0.0/t/simple-http_ActorSpawn.ru0000644000004100000410000000036112641135250020611 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env|
  if env['rack.multithread'] && env['rainbows.model'] == :ActorSpawn
    [ 200, {}, [ Actor.current.inspect << "\n" ] ]
  else
    raise "rack.multithread is not true"
  end
}
rainbows-5.0.0/t/t0022-copy_stream-byte-range.sh0000755000004100000410000000064612641135250021324 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
test -r random_blob || die "random_blob required, run with 'make $0'"
check_copy_stream

t_plan 13 "IO.copy_stream byte range response for $model"

t_begin "setup and startup" && {
	rtmpfiles out err
	rainbows_setup $model
	# can't load Rack::Lint here since it clobbers body#to_path
	rainbows -E none -D large-file-response.ru -c $unicorn_config
	rainbows_wait_start
}

. ./byte-range-common.sh
rainbows-5.0.0/t/heartbeat-timeout.ru0000644000004100000410000000050112641135250017621 0ustar  www-datawww-datause Rack::ContentLength
headers = { 'Content-Type' => 'text/plain' }
run lambda { |env|
  case env['PATH_INFO']
  when "/block-forever"
    Process.kill(:STOP, $$)
    sleep # in case STOP signal is not received in time
    [ 500, headers, [ "Should never get here\n" ] ]
  else
    [ 200, headers, [ "#$$\n" ] ]
  end
}
rainbows-5.0.0/t/t0007-worker-follows-master-to-death.sh0000755000004100000410000000226212641135250022733 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
t_plan 7 "ensure worker follows master to death"

t_begin "setup" && {
	rtmpfiles curl_err curl_out
	rainbows_setup
	echo timeout 3 >> $unicorn_config
	rainbows -D -c $unicorn_config worker-follows-master-to-death.ru
	rainbows_wait_start
}

t_begin "read worker PID" && {
	worker_pid=$(curl -sSf http://$listen/pid)
	t_info "worker_pid=$worker_pid"
}

t_begin "start a long sleeping request" && {
	curl -sSfv -T- $curl_out 2> $fifo &
	curl_pid=$!
	t_info "curl_pid=$curl_pid"
}

t_begin "nuke the master once we're connected" && {
	awk -v rainbows_pid=$rainbows_pid '
{ print $0 }
/100 Continue/ {
	print "awk: sending SIGKILL to", rainbows_pid
	system("kill -9 "rainbows_pid)
}' < $fifo > $curl_err
	wait
}

t_begin "worker is no longer running" && {
	nr=30
	while kill -0 $worker_pid 2>/dev/null && test $nr -gt 0
	do
		nr=$(( $nr - 1))
		sleep 1
	done
	kill -0 $worker_pid 2> $tmp && false
	test -s $tmp
}

t_begin "sleepy curl request is no longer running" && {
	kill -0 $curl_pid 2> $tmp && false
	test -s $tmp
}

t_begin "sleepy curl request completed gracefully" && {
	test x$(cat $curl_out) = x$worker_pid
	dbgcat curl_err
}

t_done
rainbows-5.0.0/t/t0035-kgio-pipe-response.sh0000755000004100000410000000273512641135250020471 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll
test -r random_blob || die "random_blob required, run with 'make $0'"

t_plan 10 "fast Kgio pipe response for $model"

t_begin "setup and startup" && {
	rtmpfiles err out
	rainbows_setup $model
	rainbows -E none -D kgio-pipe-response.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "read random blob sha1" && {
	random_blob_sha1=$(rsha1 < random_blob)
	three_sha1=$(cat random_blob random_blob random_blob | rsha1)
}

t_begin "single request matches" && {
	sha1=$(curl -sSfv 2> $err http://$listen/ | rsha1)
	test -n "$sha1"
	test x"$sha1" = x"$random_blob_sha1"
}

t_begin "Content-Length header preserved in response" && {
	grep "^< Content-Length:" $err
}

t_begin "send three keep-alive requests" && {
	sha1=$(curl -vsSf 2> $err \
	       http://$listen/ http://$listen/ http://$listen/ | rsha1)
	test -n "$sha1"
	test x"$sha1" = x"$three_sha1"
}

t_begin "ensure responses were all keep-alive" && {
	test 3 -eq $(grep '< Connection: keep-alive' < $err | count_lines)
}

t_begin "HTTP/1.0 test" && {
	sha1=$(curl -0 -v 2> $err -sSf http://$listen/ | rsha1)
	test $sha1 = $random_blob_sha1
	grep '< Connection: close' < $err
}

t_begin "HTTP/0.9 test" && {
	(
		printf 'GET /\r\n'
		rsha1 < $fifo > $tmp &
		wait
		echo ok > $ok
	) | socat - TCP:$listen > $fifo
	test $(cat $tmp) = $random_blob_sha1
	test xok = x$(cat $ok)
}

t_begin "shutdown server" && {
	kill -QUIT $rainbows_pid
}

t_begin "check stderr" && check_stderr

t_done
rainbows-5.0.0/t/close-has-env.ru0000644000004100000410000000235512641135250016653 0ustar  www-datawww-data#\ -E none
use Rainbows::DevFdResponse
class ClosablePipe < ::IO
  attr_accessor :env

  def self.new(env)
    rv = popen "echo hello", "rb"
    rv.env = env
    rv
  end

  def close
    return if closed? # idempotent for Ruby 2.3.0 compatibility
    super
    $stdout.syswrite "path_info=#{@env['PATH_INFO']}\n"
  end
end

class ClosableFile < ::File
  attr_accessor :env
  alias to_path path
  def close
    super
    $stdout.syswrite "path_info=#{@env['PATH_INFO']}\n"
  end
end

class Blob
  def initialize(env)
    @env = env
  end

  def each(&block)
    yield "BLOB\n"
  end

  def close
    $stdout.syswrite "path_info=#{@env['PATH_INFO']}\n"
  end
end

run(lambda { |env|
  case env["PATH_INFO"]
  when %r{\A/pipe/}
    [ 200,
      [ %w(Content-Length 6), %w(Content-Type text/plain)],
      ClosablePipe.new(env)
    ]
  when %r{\A/file/}
    f = ClosableFile.open("env.ru", "rb")
    f.env = env
    [ 200, {
      'X-Req-Path' => env["PATH_INFO"],
      'Content-Length' => f.stat.size.to_s,
      'Content-Type' => 'text/plain' },
      f
    ]
  when %r{\A/blob/}
    [ 200,
      [%w(Content-Length 5), %w(Content-Type text/plain)],
      Blob.new(env)
    ]
  else
    [ 404, [%w(Content-Length 0), %w(Content-Type text/plain)], [] ]
  end
})
rainbows-5.0.0/t/t0000.ru0000644000004100000410000000015012641135250014741 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env| [ 200, {}, [ env.inspect << "\n" ] ] }
rainbows-5.0.0/t/t0010-keepalive-timeout-effective.sh0000755000004100000410000000144112641135250022322 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll

t_plan 6 "keepalive_timeout tests for $model"

t_begin "setup and start" && {
	rainbows_setup
	rainbows -D env.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin 'check server up' && {
	curl -sSf http://$listen/
}

t_begin "send keepalive response that does not expect close" && {
	req='GET / HTTP/1.1\r\nHost: example.com\r\n\r\n'
	t0=$(unix_time)
	(
		cat $fifo > $tmp &
		printf "$req"
		wait
		unix_time > $ok
	) | socat - TCP:$listen > $fifo
	now="$(cat $ok)"
	elapsed=$(( $now - $t0 ))
	t_info "elapsed=$elapsed (expecting >=5s)"
	test $elapsed -ge 5
}

t_begin 'keepalive not unreasonably long' && {
	test $elapsed -lt 15
}

t_begin "killing succeeds" && {
	kill $rainbows_pid
}

t_begin "check stderr" && {
	check_stderr
}

t_done
rainbows-5.0.0/t/simple-http_RevFiberSpawn.ru0000644000004100000410000000035712641135250021252 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env|
  if env['rack.multithread'] == false &&
    env['rainbows.model'] == :RevFiberSpawn
    [ 200, {}, [ Thread.current.inspect << "\n" ] ]
  else
    raise env.inspect
  end
}
rainbows-5.0.0/t/simple-http_EventMachine.ru0000644000004100000410000000035612641135250021102 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env|
  if env['rack.multithread'] == false && env['rainbows.model'] == :EventMachine
    [ 200, {}, [ env.inspect << "\n" ] ]
  else
    raise "rack.multithread is true"
  end
}
rainbows-5.0.0/t/t0113-rewindable-input-false.sh0000755000004100000410000000114112641135250021275 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models EventMachine NeverBlock
skip_models Rev RevThreadSpawn RevThreadPool
skip_models Coolio CoolioThreadSpawn CoolioThreadPool
skip_models Epoll XEpoll

t_plan 4 "rewindable_input toggled to false"

t_begin "setup and start" && {
	rainbows_setup
	echo rewindable_input false >> $unicorn_config
	rainbows -D -c $unicorn_config t0113.ru
	rainbows_wait_start
}

t_begin "ensure worker is started" && {
	test xOK = x$(curl -T t0113.ru -H Expect: -vsSf http://$listen/)
}

t_begin "killing succeeds" && {
	kill $rainbows_pid
}

t_begin "check stderr" && {
	check_stderr
}

t_done
rainbows-5.0.0/t/t0300-async_sinatra.sh0000755000004100000410000000245612641135250017602 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh

# n - number of seconds to sleep
n=10
CONFIG_RU=async_sinatra.ru
case $model in
NeverBlock|EventMachine) ;;
*)
	t_info "skipping $T since it's not compatible with $model"
	exit 0
	;;
esac

t_plan 7 "async_sinatra test for EM"

t_begin "setup and start" && {
	rainbows_setup
	rtmpfiles a b c curl_err

	# Async Sinatra does not support Rack::Lint
	rainbows -E none -D $CONFIG_RU -c $unicorn_config
	rainbows_wait_start
}

t_begin "send async requests off in parallel" && {
	t0=$(unix_time)
	( curl --no-buffer -sSf http://$listen/$n 2>> $curl_err | tee $a) &
	( curl --no-buffer -sSf http://$listen/$n 2>> $curl_err | tee $b) &
	( curl --no-buffer -sSf http://$listen/$n 2>> $curl_err | tee $c) &
}

t_begin "ensure elapsed requests were processed in parallel" && {
	wait
	t1=$(unix_time)
	elapsed=$(( $t1 - $t0 ))
	echo "elapsed=$elapsed < 30"
	test $elapsed -lt 30
}

t_begin "termination signal sent" && {
	kill $rainbows_pid
}

dbgcat a
dbgcat b
dbgcat c
dbgcat r_err
dbgcat curl_err

t_begin "no errors from curl" && {
	test ! -s $curl_err
}

t_begin "no errors in stderr" && check_stderr

dbgcat r_err

t_begin "no responses are chunked" && {
	test x"$(cat $a)" = x"delayed for $n seconds"
	test x"$(cat $b)" = x"delayed for $n seconds"
	test x"$(cat $c)" = x"delayed for $n seconds"
}

t_done
rainbows-5.0.0/t/env.ru0000644000004100000410000000015012641135250014766 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env| [ 200, {}, [ env.inspect << "\n" ] ] }
rainbows-5.0.0/t/t0104-rack-input-limit-tiny.sh0000755000004100000410000001213412641135250021112 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll
test -r random_blob || die "random_blob required, run with 'make $0'"
req_curl_chunked_upload_err_check

t_plan 18 "rack.input client_max_body_size tiny"

t_begin "setup and startup" && {
	rtmpfiles curl_out curl_err
	rainbows_setup $model
	ed -s $unicorn_config < $curl_out 2> $curl_err || > $ok
	dbgcat curl_err
	dbgcat curl_out
	grep 413 $curl_err
	test -e $ok
}

t_begin "stops a large chunked request" && {
	rm -f $ok
	dd if=/dev/zero bs=257 count=1 | \
	  curl -vsSf -T- -H Expect: \
	  http://$listen/ > $curl_out 2> $curl_err || > $ok
	dbgcat curl_err
	dbgcat curl_out
	grep 413 $curl_err
	test -e $ok
}

t_begin "small size sha1 chunked ok" && {
	blob_sha1=b376885ac8452b6cbf9ced81b1080bfd570d9b91
	rm -f $ok
	dd if=/dev/zero bs=256 count=1 | \
	  curl -vsSf -T- -H Expect: \
	  http://$listen/ > $curl_out 2> $curl_err
	dbgcat curl_err
	dbgcat curl_out
	test "$(cat $curl_out)" = $blob_sha1
}

t_begin "small size sha1 content-length ok" && {
	blob_sha1=b376885ac8452b6cbf9ced81b1080bfd570d9b91
	rm -f $ok
	dd if=/dev/zero bs=256 count=1 of=$tmp
	curl -vsSf -T $tmp -H Expect: \
	  http://$listen/ > $curl_out 2> $curl_err
	dbgcat curl_err
	dbgcat curl_out
	test "$(cat $curl_out)" = $blob_sha1
}

t_begin "stops a regular request (gets_read_mix)" && {
	rm -f $ok
	dd if=/dev/zero bs=257 count=1 of=$tmp
	curl -vsSf -T $tmp -H Expect: \
	  http://$listen/gets_read_mix > $curl_out 2> $curl_err || > $ok
	dbgcat curl_err
	dbgcat curl_out
	grep 413 $curl_err
	test -e $ok
}

t_begin "stops a large chunked request (gets_read_mix)" && {
	rm -f $ok
	dd if=/dev/zero bs=257 count=1 | \
	  curl -vsSf -T- -H Expect: \
	  http://$listen/gets_read_mix > $curl_out 2> $curl_err || > $ok
	dbgcat curl_err
	dbgcat curl_out
	grep 413 $curl_err
	test -e $ok
}

t_begin "stops a large line-based chunked request (gets_read_mix)" && {
	rm -f $ok
	=0;) print "hello world"}' | \
	  curl -vsSf -T- -H Expect: \
	  http://$listen/gets_read_mix > $curl_out 2> $curl_err || > $ok
	dbgcat curl_err
	dbgcat curl_out
	grep 413 $curl_err
	test -e $ok
}

t_begin "OK with line-based chunked request (gets_read_mix)" && {
	rm -f $ok
	=0;) print "hello world"}' | \
	  curl -vsSf -T- -H Expect: \
	  http://$listen/gets_read_mix > $curl_out 2> $curl_err
	dbgcat curl_err
	dbgcat curl_out
	test x"$(cat $curl_out)" = x23eab3cebcbe22a0456c8462e3d3bb01ae761702
}

t_begin "small size sha1 chunked ok (gets_read_mix)" && {
	blob_sha1=b376885ac8452b6cbf9ced81b1080bfd570d9b91
	rm -f $ok
	dd if=/dev/zero bs=256 count=1 | \
	  curl -vsSf -T- -H Expect: \
	  http://$listen/gets_read_mix > $curl_out 2> $curl_err
	dbgcat curl_err
	dbgcat curl_out
	test "$(cat $curl_out)" = $blob_sha1
}

t_begin "small size sha1 content-length ok (gets_read_mix)" && {
	blob_sha1=b376885ac8452b6cbf9ced81b1080bfd570d9b91
	rm -f $ok
	dd if=/dev/zero bs=256 count=1 of=$tmp
	curl -vsSf -T $tmp -H Expect: \
	  http://$listen/gets_read_mix > $curl_out 2> $curl_err
	dbgcat curl_err
	dbgcat curl_out
	test "$(cat $curl_out)" = $blob_sha1
}

t_begin "stops a regular request (each)" && {
	rm -f $ok
	dd if=/dev/zero bs=257 count=1 of=$tmp
	curl -vsSf -T $tmp -H Expect: \
	  http://$listen/each > $curl_out 2> $curl_err || > $ok
	dbgcat curl_err
	dbgcat curl_out
	grep 413 $curl_err
	test -e $ok
}

t_begin "stops a large chunked request (each)" && {
	rm -f $ok
	dd if=/dev/zero bs=257 count=1 | \
	  curl -vsSf -T- -H Expect: \
	  http://$listen/each > $curl_out 2> $curl_err || > $ok
	dbgcat curl_err
	dbgcat curl_out
	grep 413 $curl_err
	test -e $ok
}

t_begin "small size sha1 chunked ok (each)" && {
	blob_sha1=b376885ac8452b6cbf9ced81b1080bfd570d9b91
	rm -f $ok
	dd if=/dev/zero bs=256 count=1 | \
	  curl -vsSf -T- -H Expect: \
	  http://$listen/each > $curl_out 2> $curl_err
	dbgcat curl_err
	dbgcat curl_out
	test "$(cat $curl_out)" = $blob_sha1
}

t_begin "small size sha1 content-length ok (each)" && {
	blob_sha1=b376885ac8452b6cbf9ced81b1080bfd570d9b91
	rm -f $ok
	dd if=/dev/zero bs=256 count=1 of=$tmp
	curl -vsSf -T $tmp -H Expect: \
	  http://$listen/each > $curl_out 2> $curl_err
	dbgcat curl_err
	dbgcat curl_out
	test "$(cat $curl_out)" = $blob_sha1
}

t_begin "stops a large line-based chunked request (each)" && {
	rm -f $ok
	=0;) print "hello world"}' | \
	  curl -vsSf -T- -H Expect: \
	  http://$listen/each > $curl_out 2> $curl_err || > $ok
	dbgcat curl_err
	dbgcat curl_out
	grep 413 $curl_err
	test -e $ok
}

t_begin "OK with line-based chunked request (each)" && {
	rm -f $ok
	=0;) print "hello world"}' | \
	  curl -vsSf -T- -H Expect: \
	  http://$listen/each > $curl_out 2> $curl_err
	dbgcat curl_err
	dbgcat curl_out
	test x"$(cat $curl_out)" = x23eab3cebcbe22a0456c8462e3d3bb01ae761702
}

t_begin "shutdown" && {
	kill $rainbows_pid
}

t_done
rainbows-5.0.0/t/close-pipe-response.ru0000644000004100000410000000112712641135250020077 0ustar  www-datawww-data# must be run without Rack::Lint since that clobbers to_path
class CloseWrapper < Struct.new(:to_io)
  def each(&block)
    to_io.each(&block)
  end

  def close
    ::File.open(ENV['fifo'], 'wb') do |fp|
      fp.syswrite("CLOSING #{to_io}\n")
      if to_io.respond_to?(:close) && ! to_io.closed?
        to_io.close
      end
    end
  end
end
use Rainbows::DevFdResponse
run(lambda { |env|
  io = IO.popen('cat random_blob', 'rb')
  [ 200,
    {
      'Content-Length' => ::File.stat('random_blob').size.to_s,
      'Content-Type' => 'application/octet-stream',
    },
    CloseWrapper[io] ]
})
rainbows-5.0.0/t/t0600-rack-fiber_pool.sh0000755000004100000410000000174312641135250020003 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
case $model in
EventMachine) ;;
*)
	t_info "skipping $T since it's not compatible with $model"
	exit 0
	;;
esac

require_check rack/fiber_pool Rack::FiberPool

t_plan 7 "basic test with rack-fiber_pool gem"

CONFIG_RU=rack-fiber_pool/app.ru

t_begin "setup and start" && {
	rainbows_setup
	rtmpfiles curl_err curl_out

	rainbows -D -c $unicorn_config $CONFIG_RU
	rainbows_wait_start
}

t_begin "send requests off in parallel" && {
	curl --no-buffer -sSf http://$listen/ >> $curl_out 2>> $curl_err &
	curl --no-buffer -sSf http://$listen/ >> $curl_out 2>> $curl_err &
	curl --no-buffer -sSf http://$listen/ >> $curl_out 2>> $curl_err &
}

t_begin "wait for curl terminations" && {
	wait
}

t_begin "termination signal sent" && {
	kill $rainbows_pid
}

t_begin "no errors from curl" && {
	test ! -s $curl_err
}

t_begin "no errors in stderr" && check_stderr

t_begin "ensure we hit 3 separate fibers" && {
	test x3 = x"$(sort < $curl_out | uniq | count_lines)"
}

t_done
rainbows-5.0.0/t/worker-follows-master-to-death.ru0000644000004100000410000000045512641135250022176 0ustar  www-datawww-datause Rack::ContentLength
headers = { 'Content-Type' => 'text/plain' }
run lambda { |env|
  /\A100-continue\z/i =~ env['HTTP_EXPECT'] and return [ 100, {}, [] ]
  env['rack.input'].read

  case env["PATH_INFO"]
  when %r{/sleep/(\d+)}
    Rainbows.sleep($1.to_i)
  end
  [ 200, headers, [ "#$$\n" ] ]
}
rainbows-5.0.0/t/t0032-close-pipe-to_path-response.sh0000755000004100000410000000435112641135250022272 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll
if ! test -d /dev/fd
then
	t_info "skipping $T since /dev/fd is required"
	exit 0
fi

t_plan 16 "close pipe to_path response for $model"

t_begin "setup and startup" && {
	rtmpfiles err out http_fifo sub_ok
	rainbows_setup $model
	export fifo
	rainbows -E none -D close-pipe-to_path-response.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin "read random blob sha1" && {
	random_blob_sha1=$(rsha1 < random_blob)
}

t_begin "start FIFO reader" && {
	cat $fifo > $out &
}

t_begin "single request matches" && {
	sha1=$(curl -sSfv 2> $err http://$listen/ | rsha1)
	test -n "$sha1"
	test x"$sha1" = x"$random_blob_sha1"
}

t_begin "body.close called" && {
	wait # for cat $fifo
	grep CLOSING $out || die "body.close not logged"
}

t_begin "start FIFO reader for abortive HTTP/1.1 request" && {
	cat $fifo > $out &
}

t_begin "send abortive HTTP/1.1 request" && {
	rm -f $ok
	(
		printf 'GET /random_blob HTTP/1.1\r\nHost: example.com\r\n\r\n'
		dd bs=4096 count=1 < $http_fifo >/dev/null
		echo ok > $ok
	) | socat - TCP:$listen > $http_fifo || :
	test xok = x$(cat $ok)
}

t_begin "body.close called for aborted HTTP/1.1 request" && {
	wait # for cat $fifo
	grep CLOSING $out || die "body.close not logged"
}

t_begin "start FIFO reader for abortive HTTP/1.0 request" && {
	cat $fifo > $out &
}

t_begin "send abortive HTTP/1.0 request" && {
	rm -f $ok
	(
		printf 'GET /random_blob HTTP/1.0\r\n\r\n'
		dd bs=4096 count=1 < $http_fifo >/dev/null
		echo ok > $ok
	) | socat - TCP:$listen > $http_fifo || :
	test xok = x$(cat $ok)
}

t_begin "body.close called for aborted HTTP/1.0 request" && {
	wait # for cat $fifo
	grep CLOSING $out || die "body.close not logged"
}

t_begin "start FIFO reader for abortive HTTP/0.9 request" && {
	cat $fifo > $out &
}

t_begin "send abortive HTTP/0.9 request" && {
	rm -f $ok
	(
		printf 'GET /random_blob\r\n'
		dd bs=4096 count=1 < $http_fifo >/dev/null
		echo ok > $ok
	) | socat - TCP:$listen > $http_fifo || :
	test xok = x$(cat $ok)
}

t_begin "body.close called for aborted HTTP/0.9 request" && {
	wait # for cat $fifo
	grep CLOSING $out || die "body.close not logged"
}

t_begin "shutdown server" && {
	kill -QUIT $rainbows_pid
}

t_begin "check stderr" && check_stderr

t_done
rainbows-5.0.0/t/t9001-sendfile-to-path.sh0000755000004100000410000000166312641135250020115 0ustar  www-datawww-data#!/bin/sh
. ./test-lib.sh
skip_models StreamResponseEpoll

t_plan 7 "Sendfile middleware test for $model"

t_begin "configure and start" && {
	rtmpfiles curl_err
	rainbows_setup

	# do not allow default middleware to be loaded since it may
	# kill body#to_path
	rainbows -E none -D t9001.ru -c $unicorn_config
	rainbows_wait_start
	random_blob_sha1=$(rsha1 < random_blob)
}

t_begin "hit with curl" && {
	sha1=$(curl -sSfv http://$listen/ 2> $curl_err | rsha1)
}

t_begin "kill server" && {
	kill $rainbows_pid
}

t_begin "SHA1 matches source" && {
	test x$random_blob_sha1 = x$sha1
}

t_begin "no errors in Rainbows! stderr" && {
	check_stderr
}

t_begin "X-Sendfile does not show up in headers" && {
	dbgcat curl_err
	if grep -i x-sendfile $curl_err
	then
		die "X-Sendfile did show up!"
	fi
}

t_begin "Content-Length is set correctly in headers" && {
	expect=$(count_bytes < random_blob)
	grep "^< Content-Length: $expect" $curl_err
}

t_done
rainbows-5.0.0/t/simple-http_WriterThreadPool.ru0000644000004100000410000000036512641135250021772 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env|
  if env['rack.multithread'] && env['rainbows.model'] == :WriterThreadPool
    [ 200, {}, [ Thread.current.inspect << "\n" ] ]
  else
    raise "rack.multithread is false"
  end
}
rainbows-5.0.0/t/t0019-keepalive-cpu-usage.sh0000644000004100000410000000261412641135250020600 0ustar  www-datawww-data#!/bin/sh
if test -z "$V" || test 0 -eq "$V"
then
	exit 0
fi
. ./test-lib.sh
skip_models WriterThreadSpawn WriterThreadPool Base
skip_models StreamResponseEpoll

t_plan 6 "keepalive_timeout CPU usage tests for $model"

t_begin "setup and start" && {
	rainbows_setup $model 50 666
	grep 'worker_connections 50' $unicorn_config
	grep 'keepalive_timeout 666' $unicorn_config
	rainbows -E deployment -D times.ru -c $unicorn_config
	rainbows_wait_start
}

t_begin 'read current times' && {
	eval "$(curl -sSf http://$listen/)"
	before_utime=$utime
	before_stime=$stime
	echo "utime=$utime stime=$stime"
}

t_begin 'keepalive connections' && {
	listen=$listen $RUBY -rsocket -e '
host, port = ENV["listen"].split(/:/)
port = port.to_i
socks = (1..49).map do |i|
  s = TCPSocket.new(host, port)
  # need to write something to get around deferred accepts
  s.write "GET /#{i} HTTP/1.1\r\nHost: example.com\r\n\r\n"
  s.readpartial 16384
  s
end
sleep
	' &
	ruby_pid=$!
	for i in $(awk 'BEGIN { for(i=0;i<60;++i) print i }'  $utime" \
		     "stime[$i] $before_stime => $stime"
	done
	kill $ruby_pid
}

t_begin "times not unreasonable" && {
	echo "utime: $before_utime => $utime" \
	     "stime: $before_stime => $stime"
}

t_begin "killing succeeds" && {
	kill $rainbows_pid
}

t_begin "check stderr" && {
	check_stderr
}

t_done
rainbows-5.0.0/t/simple-http_Epoll.ru0000644000004100000410000000034312641135250017603 0ustar  www-datawww-datause Rack::ContentLength
use Rack::ContentType
run lambda { |env|
  if env['rack.multithread'] == false && env['rainbows.model'] == :Epoll
    [ 200, {}, [ Thread.current.inspect << "\n" ] ]
  else
    raise env.inspect
  end
}
rainbows-5.0.0/local.mk.sample0000644000004100000410000000216712641135250016300 0ustar  www-datawww-data# this is the local.mk file used by Eric Wong on his dev boxes.
# GNUmakefile will source local.mk in the top-level source tree
# if it is present.
#
# This is depends on a bunch of GNU-isms from bash, sed, touch.

DLEXT := so

# if you have a decent amount of RAM, setting TMPDIR to be on tmpfs
# can significantly improve performance because uploads take a lot
# of disk I/O due to the rewindability requirement in Rack.
# TMPDIR := /dev/shm
# export TMPDIR

# Avoid loading rubygems to speed up tests because gmake is
# fork+exec heavy with Ruby.
prefix = $(HOME)

ifeq ($(r192),)
  RUBY := $(prefix)/bin/ruby
else
  prefix := $(prefix)/ruby-1.9.2
  export PATH := $(prefix)/bin:$(PATH)
  RUBY := $(prefix)/bin/ruby --disable-gems
endif

# pipefail is THE reason to use bash (v3+) or never revisions of ksh93
# SHELL := /bin/bash -e -o pipefail
SHELL := /bin/ksh93 -e -o pipefail

# trace execution of tests
# TRACER = strace -f -o $(t_pfx).strace -s 100000
# TRACER = /usr/bin/time -v -o $(t_pfx).time

full-test: test-18 test-192
test-18:
	$(MAKE) test 2>&1 | sed -e 's!^!1.8 !'
test-192:
	$(MAKE) test r192=T 2>&1 | sed -e 's!^!1.9.2 !'
rainbows-5.0.0/LATEST0000644000004100000410000000634412641135250014315 0ustar  www-datawww-data=== Rainbows! 5.0.0 - maintained as long as anybody uses it! / 2015-11-25 00:58 UTC

  This release syncs with unicorn 5 and drops some old compatibility
  cruft from old releases.  Performance should be roughly unchanged
  for Ruby 2.2 users while older Rubies (1.9.3 - 2.1) will see
  minor, probably unnoticeable performance regressions.

  Compatibility:

  * The horrible, proprietary (:P) "Status:" response header is
    finally gone, saving at least 16 precious bytes in every HTTP
    response.  This should make it easier to write custom HTTP clients
    which are compatible across all HTTP servers.  It will hopefully
    make migrating between different Rack servers easier for new
    projects.

  * Ruby 1.8 support removed.  Ruby 1.9.3 is currently the earliest
    supported version.  However, expect minor, likely-unnoticeable
    performance regressions if you use Ruby 2.1 or earlier.  Going
    forward, Rainbows! will favor the latest version (currently 2.2) of
    the mainline Ruby implementation, potentially sacrificing
    performance on older Rubies.

  New features:

  * sd_listen_fds(3) emulation added for systemd compatibility.
    You may now stop using PID files and other process monitoring
    software when using systemd.

  * Newly-set TCP socket options are now applied to inherited sockets.

  * Dynamic changes in the application to Rack::Utils::HTTP_STATUS
    hash is now supported; allowing users to set custom status lines
    in Rack to be reflected in unicorn.  This feature causes a minor
    performance regression, but is made up for Ruby 2.2 users with
    other optimizations.

  * The monotonic clock is used under Ruby 2.1+, making the
    timeout feature immune to system clock changes.

  As Rainbows! may be used anonymously without registration, the
  project is committed to supporting anonymous and pseudonymous
  help requests, contributions and feedback via plain-text mail to:

      rainbows-public@bogomips.org

  The mail submission port (587) is open to those behind firewalls
  and allows access via Tor and anonymous remailers.
  Archives are accessible via:

  * http://bogomips.org/rainbows-public/
  * nntp://news.public-inbox.org/inbox.comp.lang.ruby.rainbows
  * nntp://news.gmane.org/gmane.comp.lang.ruby.rainbows.general

  and mirrored to various other places, so you do not even need
  to use a valid address when posting.

  18 changes since Rainbows! 4.7.0

        README: remove Zbatery references
        http_parser: handle keepalive_requests internally
        kill the moronic Status: header
        reflect changes in Rack::Utils::HTTP_STATUS_CODES
        reduce constant lookup dependencies
        http_parser: workaround hijack changes in unicorn 5
        http_server: add master_pid attribute
        stream_response_epoll: remove hijack_prepare call
        bump to unicorn 5.0.1, use monotonic clock
        add .gitattributes for Ruby method detection
        response: avoid garbage string entirely
        tiny bytecode reductions for cold paths
        Ruby 1.9.3+-only cleanups
        revactor: remove fcntl dependency
        response: simplify regexp
        t0105: fix test reliability
        fix Rainbows.now definition for old Rubies
        fix broken constant lookups in unmaintained bits

rainbows-5.0.0/Test_Suite0000777000004100000410000000000012641135250016522 2t/READMEustar  www-datawww-datarainbows-5.0.0/FAQ0000644000004100000410000000764312641135250013733 0ustar  www-datawww-data= Frequently Asked Questions about \Rainbows!

=== Why is \Rainbows! a separate project from unicorn?

\Rainbows! is for the odd, corner-case requests that unicorn is poorly
suited for.  More scalable concurrency models introduce additional
complexity that unicorn users and developers are uncomfortable with for
the common cases.


=== What complexity?  Threads/events/actors are easy to work with!

Good for you.  Some of us depend on libraries incompatible with those
models, or are just too lazy to deal with them for the majority of
requests we service.


=== Isn't "rainbows" a branch of unicorn?

That functionality is now in the Revactor model of \Rainbows!
However, \Revactor is not recommended since it is dormant
upstream and requires your application (and all its libraries)
to cooperate with \Revactor for concurrency.


=== What happened to the "gossamer" branch of unicorn?

It became the ThreadPool model of \Rainbows!


=== Which concurrency model should I use?

It depends on your application, libraries, Ruby stack and use cases.
That's why we support as many concurrency model as we can.  Each model
has their own strengths and weaknesses in terms of maturity,
ease-of-debugging, compatibility, performance, and memory usage.


=== Should I put \Rainbows! behind nginx to serve slow clients?

It is optional.  You can still use nginx to route certain requests to
unicorn and others to \Rainbows!  nginx will always outperform
\Rainbows! in both pure reverse proxy applications and for serving
static files,  but \Rainbows! is for hosting applications that are more
easily-implemented in Ruby than C.


=== Should I use \Rainbows! to serve static files?

It depends on the size and amount of static files you're serving.  If
you're serving a lot of static files (especially large ones), then by
all means use nginx.  If not, then \Rainbows! is likely a "good enough"
solution even if nginx will always outperform it in raw throughput.


=== How do I support SSL?

If you need streaming "rack.input" to do on-the-fly upload processing
within your Rack application, then using an SSL proxy such as
{Pound}[http://www.apsis.ch/pound/] or {Stunnel}[http://stunnel.org/] is
required.  Pound has built-in X-Forwarded-For support while Stunnel
requires a extra {patch}[http://haproxy.1wt.eu/download/patches/].

If you don't need streaming "rack.input", then nginx is a great HTTPS
reverse proxy.

Refer to the {unicorn FAQ}[http://unicorn.bogomips.org/FAQ.html] on how
to ensure redirects go to "https://" URLs.


=== Is there a "rainbows_rails" command like there is "unicorn_rails"?

No.

"unicorn_rails" was written primarily to support older versions of
Rails.  Since \Rainbows! is designed for newer applications based on
Rack, it can just use a "config.ru" file like other Rack frameworks and
applications.

For Rails 3.x, you should already have a config.ru file and
"rainbows(1)" will work out-of-the-box like "rackup(1)".  Rails 3
will support RACK_ENV as set by "rainbows(1)", so you won't need
to set RAILS_ENV.

For Rails 2.3.x, the following config.ru will work for you:

  ENV["RAILS_ENV"] ||= ENV["RACK_ENV"]
  require "#{::File.expand_path('config/environment')}"
  use Rails::Rack::Static
  run ActionController::Dispatcher.new

For older versions of Rails, the following config.ru will work:

  ENV["RAILS_ENV"] ||= ENV["RACK_ENV"]
  require "#{::File.expand_path('config/boot')}"
  require "#{::File.expand_path('config/environment')}"
  require 'unicorn/app/old_rails'
  require 'unicorn/app/old_rails/static' # not needed with Unicorn 0.95+
  use Unicorn::App::OldRails::Static
  run Unicorn::App::OldRails.new

One thing to watch out for is that RAILS_ENV will not be set in the
environment for you, thus we set it to match RACK_ENV.

=== I'm using threads and Rails is misbehaving!

If you use any of the threaded concurrency models, you will need to use
{config.threadsafe!}[http://m.onkey.org/thread-safety-for-your-rails]
in your config/environments/$RAILS_ENV.rb
rainbows-5.0.0/GNUmakefile0000644000004100000410000000136712641135250015450 0ustar  www-datawww-data# use GNU Make to run tests in parallel, and without depending on RubyGems
all::
# RSYNC_DEST := rubyforge.org:/var/www/gforge-projects/rainbows
RSYNC_DEST := rainbows.bogomips.org:/srv/rainbows
rfpackage := rainbows
PLACEHOLDERS := rainbows_1 Summary

man-rdoc: man html
	$(MAKE) -C Documentation comparison.html
doc:: man-rdoc
include pkg.mk

base_bins := rainbows
bins := $(addprefix bin/, $(base_bins))
man1_bins := $(addsuffix .1, $(base_bins))
man1_paths := $(addprefix man/man1/, $(man1_bins))

clean:
	-$(MAKE) -C Documentation clean

man html:
	$(MAKE) -C Documentation install-$@

pkg_extra += $(man1_paths) lib/rainbows/version.rb

lib/rainbows/version.rb: GIT-VERSION-FILE

all:: test
test: lib/rainbows/version.rb
	$(MAKE) -C t

.PHONY: man html
rainbows-5.0.0/README0000644000004100000410000001374712641135250014263 0ustar  www-datawww-data= Rainbows! - unicorn for sleepy apps and slow clients

\Rainbows! is an HTTP server for sleepy Rack applications.  It is based on
unicorn, but designed to handle applications that expect long
request/response times and/or slow clients.

If you're on GNU/Linux and overwhelmed by options in \Rainbows!,
consider {yahns}[http://yahns.yhbt.net/] as it has fewer options
and more energy-efficient during non-peak traffic and may also
be configured as a single worker process.

For Rack applications not heavily bound by slow external network
dependencies, consider unicorn instead as it simpler and easier to
debug.

== \Rainbows! is about Diversity

We aim to support as many concurrency models as we can because they all
suck; differently.

For network concurrency, models we currently support are:

* {Coolio}[link:Rainbows/Coolio.html]
* {CoolioFiberSpawn}[link:Rainbows/CoolioFiberSpawn.html]
* {CoolioThreadPool}[link:Rainbows/CoolioThreadPool.html]
* {CoolioThreadSpawn}[link:Rainbows/CoolioThreadSpawn.html]
* {Epoll}[link:Rainbows/Epoll.html]
* {EventMachine}[link:Rainbows/EventMachine.html]
* {FiberPool}[link:Rainbows/FiberPool.html]
* {FiberSpawn}[link:Rainbows/FiberSpawn.html]
* {NeverBlock}[link:Rainbows/NeverBlock.html]
* {Revactor}[link:Rainbows/Revactor.html]
* {ThreadPool}[link:Rainbows/ThreadPool.html]
* {ThreadSpawn}[link:Rainbows/ThreadSpawn.html]
* {WriterThreadPool}[link:Rainbows/WriterThreadPool.html]
* {WriterThreadSpawn}[link:Rainbows/WriterThreadSpawn.html]
* {XEpoll}[link:Rainbows/XEpoll.html]
* {XEpollThreadPool}[link:Rainbows/XEpollThreadPool.html]
* {XEpollThreadSpawn}[link:Rainbows/XEpollThreadSpawn.html]

We have {many more on the way}[link:TODO.html] for handling network
concurrency.  Additionally, we also use multiple processes (managed by
Unicorn) for robustness and CPU/memory/disk concurrency.

We also provide Rainbows::AppPool Rack middleware for some network
concurrency models for limiting application concurrency independently of
network concurrency.

== Features

* Designed for {Rack}[http://rack.github.io/], the standard for
  modern Ruby HTTP applications.

* Built on {Unicorn}[http://unicorn.bogomips.org/], inheriting its
  process/socket management features such as transparent upgrades and
  Ruby configuration DSL.

* As with Unicorn, it is able to stream large request bodies off the
  socket to the application while the client is still uploading.  Since
  \Rainbows! can handle slow clients, this feature is more useful than
  it is with Unicorn.

* Combines heavyweight concurrency (worker processes) with lightweight
  concurrency (Events/Fibers/Actors/Threads), allowing CPU/memory/disk to
  be scaled independently of client connections.  More concurrency models
  (listed in the TODO) will be supported as we find time for them.

* We give you {lots of options}[link:Summary.html] with more
  {on the way}[link:TODO.html].

== Applications

\Rainbows! is mainly designed for the odd things Unicorn sucks at:

* 3rd-party APIs (to services outside your control/LAN)
* OpenID consumers (to providers outside your control/LAN)
* Reverse proxy implementations with editing/censoring
  (to upstreams outside your control/LAN)
* Comet
* BOSH (with slow clients)
* HTTP server push
* Long polling
* Reverse AJAX
* real-time upload processing (via {upr}[http://upr.bogomips.org/])

\Rainbows! can also be used to service slow clients directly even with
fast applications.

== License

\Rainbows! is copyright 2009,2010 by all contributors (see logs in git).
\Rainbows! is licensed under the Ruby (1.8) license or the GPLv2 or later.
See the included {LICENSE}[link:LICENSE.html] file for more details.

\Rainbows! is 100% Free Software.

== Install

You may install it via RubyGems on RubyGems.org:

  gem install rainbows

== Usage

=== for Rack applications

In APP_ROOT (where config.ru is located), run:

  rainbows

\Rainbows! will bind to all interfaces on TCP port 8080 by default.

=== Configuration File(s)

\Rainbows! will look for the config.ru file used by rackup in APP_ROOT.

For deployments, it can use a config file for Unicorn and
\Rainbows!-specific options specified by the +--config-file/-c+
command-line switch.  \Rainbows! accepts all options found in
{Unicorn::Configurator}[http://unicorn.bogomips.org/Unicorn/Configurator.html]
as well as the "\Rainbows!" block, so you can have the following in your
config file:

    worker_processes 4 # assuming four CPU cores
    Rainbows! do
      use :FiberSpawn
      worker_connections 100
    end

See the {Rainbows! configuration}[link:Rainbows/Configurator.html]
{documentation}[link:Rainbows/Configurator.html]
for more details.

== Development

You can get the latest source via git from the following locations
(these versions may not be stable):

  git://bogomips.org/rainbows.git
  git://repo.or.cz/rainbows.git (mirror)

You may browse the code from the web and download the latest snapshot
tarballs here:

* http://bogomips.org/rainbows.git (cgit)
* http://repo.or.cz/w/rainbows.git (gitweb)

Inline patches (from "git format-patch") to the mailing list are
preferred because they allow code review and comments in the reply to
the patch.

We will adhere to mostly the same conventions for patch submissions as
git itself.  See the Documentation/SubmittingPatches document
distributed with git on on patch submission guidelines to follow.  Just
don't email the git mailing list or maintainer with \Rainbows! patches.

== Disclaimer

There is NO WARRANTY whatsoever if anything goes wrong, but let us know
and we'll try our best to fix it.

== Contact

All feedback (bug reports, user/development discussion, patches, pull
requests) go to the mailing list.  Patches must be sent inline
(git format-patch -M + git send-email).  No subscription is necessary
to post on the mailing list.  No top posting.

* email: mailto:rainbows-public@bogomips.org
* subscribe: mailto:rainbows-public+subscribe@bogomips.org
* archives: http://bogomips.org/rainbows-public/
  nntp://news.public-inbox.org/inbox.comp.lang.ruby.rainbows
  nntp://news.gmane.org/gmane.comp.lang.ruby.rainbows.general
rainbows-5.0.0/rainbows_10000644000004100000410000000002312641135250015351 0ustar  www-datawww-dataolddoc_placeholder
rainbows-5.0.0/NEWS0000644000004100000410000012570012641135250014073 0ustar  www-datawww-data=== Rainbows! 5.0.0 - maintained as long as anybody uses it! / 2015-11-25 00:58 UTC

  This release syncs with unicorn 5 and drops some old compatibility
  cruft from old releases.  Performance should be roughly unchanged
  for Ruby 2.2 users while older Rubies (1.9.3 - 2.1) will see
  minor, probably unnoticeable performance regressions.

  Compatibility:

  * The horrible, proprietary (:P) "Status:" response header is
    finally gone, saving at least 16 precious bytes in every HTTP
    response.  This should make it easier to write custom HTTP clients
    which are compatible across all HTTP servers.  It will hopefully
    make migrating between different Rack servers easier for new
    projects.

  * Ruby 1.8 support removed.  Ruby 1.9.3 is currently the earliest
    supported version.  However, expect minor, likely-unnoticeable
    performance regressions if you use Ruby 2.1 or earlier.  Going
    forward, Rainbows! will favor the latest version (currently 2.2) of
    the mainline Ruby implementation, potentially sacrificing
    performance on older Rubies.

  New features:

  * sd_listen_fds(3) emulation added for systemd compatibility.
    You may now stop using PID files and other process monitoring
    software when using systemd.

  * Newly-set TCP socket options are now applied to inherited sockets.

  * Dynamic changes in the application to Rack::Utils::HTTP_STATUS
    hash is now supported; allowing users to set custom status lines
    in Rack to be reflected in unicorn.  This feature causes a minor
    performance regression, but is made up for Ruby 2.2 users with
    other optimizations.

  * The monotonic clock is used under Ruby 2.1+, making the
    timeout feature immune to system clock changes.

  As Rainbows! may be used anonymously without registration, the
  project is committed to supporting anonymous and pseudonymous
  help requests, contributions and feedback via plain-text mail to:

      rainbows-public@bogomips.org

  The mail submission port (587) is open to those behind firewalls
  and allows access via Tor and anonymous remailers.
  Archives are accessible via:

  * http://bogomips.org/rainbows-public/
  * nntp://news.public-inbox.org/inbox.comp.lang.ruby.rainbows
  * nntp://news.gmane.org/gmane.comp.lang.ruby.rainbows.general

  and mirrored to various other places, so you do not even need
  to use a valid address when posting.

  18 changes since Rainbows! 4.7.0

        README: remove Zbatery references
        http_parser: handle keepalive_requests internally
        kill the moronic Status: header
        reflect changes in Rack::Utils::HTTP_STATUS_CODES
        reduce constant lookup dependencies
        http_parser: workaround hijack changes in unicorn 5
        http_server: add master_pid attribute
        stream_response_epoll: remove hijack_prepare call
        bump to unicorn 5.0.1, use monotonic clock
        add .gitattributes for Ruby method detection
        response: avoid garbage string entirely
        tiny bytecode reductions for cold paths
        Ruby 1.9.3+-only cleanups
        revactor: remove fcntl dependency
        response: simplify regexp
        t0105: fix test reliability
        fix Rainbows.now definition for old Rubies
        fix broken constant lookups in unmaintained bits

=== Rainbows! 4.7.0 - updates for ruby 2.3.0dev / 2015-10-19 21:24 UTC

  This release includes fixes for upcoming changes in Ruby 2.3.0
  (due December 2015).  Use of Rainbows! for new projects is not
  recommended, try other servers instead.

  * update dependencies for Ruby 2.2.0dev
  * switch docs + website to olddoc
  * gemspec: fix bad reference to rdoc_options
  * README: reference yahns
  * build: fix quoting issue with double parens
  * response: avoid unnecessary args to IO.copy_stream
  * t/close-has-env.ru: ensure close is idempotent
  * sync_close: This fix breakage from Ruby-trunk r50118
  * t/t0044-autopush.sh: remove test
  * t/test_isolate.rb: updates for various gem versions
  * response: convert source arg to path before IO.copy_stream
  * speed up QUIT for users of the unicorn worker_loop
  * gemspec: use SPDX-compatible license for GPL-2.0+

=== Rainbows! 4.6.2 - see you on the other side / 2014-05-12 07:32 UTC

  This release updates documentation to reflect the migration of the
  mailing list to a new public-inbox[1] instance.  This is necessary
  due to the impending RubyForge shutdown on May 15, 2014.

  The public-inbox address is: rainbows-public@bogomips.org
  (no subscription required, plain text only)
  ssoma[2] git archives: git://bogomips.org/rainbows-public
  browser-friendly archives: http://bogomips.org/rainbows-public/

  As evidenced by our git history, Rainbows! development has stagnated
  over the years.  Rainbows! was designed to be an unopinionated
  exploration into various concurrency options offered in the Ruby
  ecosystem.

  In recent years, I have come to favor the one-shot-based,
  worst-of-all-worlds design of yahns: http://yahns.yhbt.net/README
  Without the exploration from Rainbows!, yahns may not exist today.

  Disclaimer: Rainbows! has always been intolerant of buggy/broken code in
  libraries and apps.  yahns is even less tolerant of buggy/broken code,
  as the SIGKILL-based timeout mechanism inherited unicorn is completely
  gone. On the other hand, yahns has reasonable defaults so you do not
  have to read documentation to configure it.

  [1] policy: http://public-inbox.org/ - git://80x24.org/public-inbox
      an "archives first" approach to mailing lists
  [2] mechanism: http://ssoma.public-inbox.org/ - git://80x24.org/ssoma
      some sort of mail archiver (using git)

=== rainbows 4.6.1 - EventMachine fixes / 2014-02-02 00:30 UTC

  * event_machine: update for unicorn 4.8.x
  * disable cramp tests for now
  * update EventMachine tests
  * set executable bit rainbows executable

  Nothing relevant for non-EM users.

=== Rainbows! 4.6.0 - fix unicorn 4.8.0 compatibility / 2014-01-17 20:25 UTC

  The unicorn 4.8.0 internal changes unfortunately broke some
  unoffically supported behavior we depended on.  This release fixes
  that, but as a result, we lose compatibility of older unicorn
  versions.  (Oops!, oh well... :x)

  There's also minor bugfixes and documentation updates.
  The website is now at http://rainbows.bogomips.org/ since
  RubyForge is shutting down.  The mailing list will be migrated
  soon.

  In order to ease transitions to future versions of the GPL, we are
  now "GPLv2 or later" instead of explicitly GPLv2 + GPLv3(-only).
  The old Ruby 1.8 license remains an option.  If the FSF turns out
  a horrible GPLv4, users are free to continue using GPLv2 or GPLv3.

=== Rainbows! 4.5.0 - hijacking support / 2013-02-27 10:28 UTC

  This release adds hijacking support for Rack 1.5.x users.
  See Rack documentation for more information about hijacking.
  Lin Jen-Shin also provided the -N/--no-default-middleware option.
  Minor packaging cleanups and new HACKING document.

  There are also some corner-case bugfixes for *Epoll* users
  (sleepy_penguin, these bugs do not affect EM or Cool.io users)
  and test suite portability improvements.

=== Rainbows! 4.4.3 - bugfixes for EventMachine users / 2013-01-18 11:23 UTC

  This release fixes two EventMachine bugfixes from Lin Jen-Shin
  and Mark J. Titorenko.  There are also some minor cleanups.

  Lin Jen-Shin (1):
        event_machine: avoid close on deferred response

  Mark J. Titorenko (1):
        event_machine: join reactor_thread if it is already running

  Eric Wong (2):
        event_machine: cleanup confusing assignment
        t/GNUmakefile: cleanup test dependencies

=== Rainbows! 4.4.2 - EventMachine async.callback fix / 2012-12-06 11:41 UTC

  One bugfix allows stream(:keep_open) in Sinatra to work
  properly.

  Thanks to W. Andrew Loe III for the informative bug report
  and reproducible test case.

  ref: http://mid.gmane.org/CA+-9oNd1EFqsniPkkPTwu5opTCinbM7-2KHoXov7+y3LE4s4Tg@mail.gmail.com

=== Rainbows! 4.4.1 - a minor bugfix for Fiber users / 2012-08-31 01:54 UTC

  Fiber-based concurrency options avoids negative sleep
  intervals.  Thanks to Lin Jen-Shin for pointing this out.

=== Rainbows! 4.4.0 - minor improvements / 2012-08-18 07:32 UTC

  For epoll/Cool.io-based concurrency models, shutdown() is now
  used to timeout keepalive clients to avoid race conditions.
  Minor documentation improvements.

=== Rainbows! 4.3.1 - small bugfix / 2011-09-02 02:18 UTC

  This release fixes a potential reentrancy deadlock when
  using the default logger from the Ruby standard library.

=== Rainbows! 4.3.0 - pull in changes from unicorn 4.1.0 / 2011-08-20 01:20 UTC

  The deprecated Rainbows::HttpResponse class is finally gone
  thanks to Pratik Naik.  Logging of errors is more consistent
  with the changes in unicorn 4.1.0.  There are also minor
  documentation updates.  See the unicorn 4.1.0 release notes
  for more details:
    http://bogomips.org/unicorn.git/tag/?id=v4.1.0

=== Rainbows! 4.2.0 - Cramp WebSocket updates! / 2011-08-05 23:35 UTC

  This release includes updates to support WebSockets
  under Cramp 0.14 and later.  This will be the last
  release which supports Cramp 0.13.

  There are no changes in this release for non-Cramp
  users.

=== Rainbows! 4.1.0 - minor internal cleanups / 2011-07-30 22:43 UTC

  There are only some minor cleanups in this release and a bump to
  kgio 2.5 to remove the dependency on io/wait.  kgio 2.5 or later
  is now required (kgio 2.6+ will be required in the next
  release).

=== Rainbows! 4.0.0 - MOAR concurrency for MOAR COARS / 2011-06-27 09:33 UTC

  Rainbows! now scales to more than 1024 worker processes without
  special privileges.  To enable this, Rainbows! now depends on
  Unicorn 4.x and thus raindrops[1].

  client_max_header_size directive is added to limit per-client
  memory usage in headers.

  An experimental StreamResponseEpoll concurrency option now
  exists to buffer outgoing responses without any thread-safe
  dependencies.  Unlike the rest of Rainbows! which works fine
  without nginx, this concurrency option is /only/ supported
  behind nginx, even more strongly so than Unicorn itself.
  non-nginx LAN clients are NOT supported for this.  This relies
  on the sleepy_penguin[2] RubyGem (and Linux).

  There are some minor bug fixes and cleanups all around.  See
  "git log v3.4.0.." for details.

  [1] http://raindrops.bogomips.org/
  [2] http://bogomips.org/sleepy_penguin/

=== Rainbows 3.4.0 - minor updates and fixes / 2011-05-21 03:19 UTC

  SIGQUIT (graceful shutdown) now drops idle keepalive clients for
  the concurrency models where maintaining an idle client is
  relatively inexpensive: Coolio, CoolioThreadPool,
  CoolioThreadSpawn, Epoll, EventMachine, XEpoll,
  XEpollThreadPool, XEpollThreadSpawn.

  Kgio.autopush now works properly for all multi-threaded
  concurrency models (if you're using :tcp_nopush).

=== Rainbows! 3.3.0 - doc improvements and more / 2011-05-16 21:15 UTC

  * improved documentation all around, suggestions/comments to further
    improve documentation is greatly welcome at: rainbows-talk@rubyforge.org

  * added GPLv3 option to the license (now (Ruby|GPLv2|GPLv3), though
    Unicorn is still (Ruby|GPLv2) for now)

  * added client_header_buffer_size config directive (default 1K)

  * small default header buffer size (16K => 1K) to reduce memory usage,
    Rails apps with cookie sessions may want to increase this (~2K)

  * all concurrency models default to 50 connections per process

  * all concurrency models with a secondary :pool_size parameter also
    default to 50 (threads/fibers/whatever)

  * RLIMIT_NOFILE and RLIMIT_NPROC are automatically increased if needed

  * Rainbows::ThreadTimeout middleware rewritten, still not recommended,
    lazy people should be using Unicorn anyways :)

  * Several experimental Linux-only edge-triggered epoll options:
    XEpollThreadSpawn, XEpollThreadPool, XEpoll, and Epoll.
    The latter two were in previous releases but never announced.
    These require the "sleepy_penguin", "raindrops", and "sendfile" RubyGems

  === Deprecations

  * Rainbows::Fiber::IO* APIs all deprecated, Rainbows! will avoid
    having any concurrency model-specific APIs in the future and
    also avoid introducing new APIs for applications.

  * Fiber-based concurrency models are no longer recommended, they're
    too fragile for most apps, use at your own risk (they'll continue to
    be supported, however).  Linux NPTL + Ruby 1.9 is pretty lightweight
    and will be even lighter in Ruby 1.9.3 if you're careful with stack
    usage in your C extensions.

=== Rainbows! 3.2.0 - trying to send files to slow clients / 2011-03-15 12:45 UTC

  We now use IO#trysendfile in the sendfile 1.1.0 to reduce the
  cost of generating backtraces for slow clients (from EAGAIN).
  Nothing new for people not serving static files (but more
  on the way).

  Existing "sendfile" gem users must upgrade to 1.1.0
  or risk being left without sendfile support at all:
   http://bogomips.org/rainbows.git/patch?id=cd8a874d

=== Rainbows! 3.1.0 - minor updates / 2011-02-11 11:13 UTC

  Small bug fixes that have been sitting around, not much but
  it's already been one month since our last release.

  * Unicorn dependency updated to 3.4.0, so we get IPv6 support
    and Kgio.autopush support for ":tcp_nopush => true" users.

  * Optional :pool_size argument is fixed for NeverBlock and
    CoolioThreadPool users.

  * Mostly minor internal code cleanups

  * Sunshowers support removed, it was out-of-date and
    unmaintained.  Cramp remains supported for now.

  * X-Rainbows-* response headers support removed, nobody used it.

  There are severalnew features in this release not documented
  here.  Consider any new features not mentioned in these release
  notes to be subject to removal/renaming in future releases.

=== Rainbows! 3.0.0 - serving the fastest apps to slow clients faster! / 2011-01-12 01:12 UTC

  There is one incompatible change: We no longer assume application
  authors are crazy and use strangely-cased headers for "Content-Length",
  "Transfer-Encoding", and "Range".  This allows us to avoid the
  case-insensitivity of Rack::Utils::HeaderHash for a speed boost on the
  few apps that already serve thousands of requests/second per-worker.

  :Coolio got "async.callback" support like :EventMachine, but it
  currently lacks EM::Deferrables which would allow us to call
  "succeed"/"fail" callbacks.  This means only one-shot response writes
  are supported.

  There are numerous internal code cleanups and several bugfixes for
  handling partial static file responses.

=== Rainbows! 2.1.0 - Cool.io, bugfixes and more! / 2010-12-29 02:18 UTC

  Cool.io (new version of Rev) support is explicitly added
  (it always worked before).  ":Coolio" may be used in place
  of ":Rev" anywhere in your Rainbows! config file.

  There is a new "keepalive_requests" config directive to limit
  the number of requests a single connection may make (default:
  100, same as nginx).  This may be useful for better
  load-balancing characteristics.

  The old "Rev" prefixes remain supported as long as Cool.io
  remains compatible with Rev (likely forever).

  Bug fixes:

  * Rainbows::ThreadTimeout middleware with multiple clients
  * large, pipelined upload errors with Revactor+Coolio(Rev)
  * high CPU usage for maintaining idle keepalive on *Fiber*
  * needless ThreadPool wakeups
  * request env prematurely cleared keepalive requests,
    breaking some middlewares such as Clogger.
  * "close" not called on body if wrapper and sendfile used together

  Various code cleanups, and our RDoc website is JavaScript-free.
  See the ChangeLog or git for all changes.

=== Rainbows! 2.0.1 - upload pipelining fixes / 2010-12-03 01:26 UTC

  For HTTP clients living on the edge and pipelining uploads, we
  now fully support pipelined requests (as long as the application
  consumes each request in its entirety).

=== Rainbows! 2.0.0 - minority rules! / 2010-11-20 03:10 UTC

  This release is targeted at the minority of web applications
  that deal heavily with uploads.

  Thanks to Unicorn 3.x, we now support HTTP keepalive for
  requests with bodies as long as the application consumes them.
  Unicorn 3.x also allows disabling the rewindability requirement
  of "rack.input" (in violation of the Rack 1.x spec).

  The global client_body_max_size may also be applied per-endpoint
  using the Rainbows::MaxBody middleware described in:

    http://rainbows.rubyforge.org/Rainbows/MaxBody.html

=== Rainbows! 1.0.0 - internal cleanups / 2010-10-28 09:01 UTC

  This release is merely a milestone in our evolving internal API.
  Use of kgio may result in performance improvements under Ruby
  1.9.2 with non-blocking I/O-intensive workloads.

  The only bugfix is that SIGHUP reloads restores defaults on
  unset settings.  A similar fix is included in Unicorn 2.0.0
  as well.

=== Rainbows! 1.0.0pre1 - kinder, gentler I/O / 2010-10-26 21:33 UTC

  Mostly internal changes for kgio (and Unicorn) integration.
  There should be no (supported) user-visible changes from
  Rainbows! 0.97.0.  kgio should improve performance for
  concurrency models that use non-blocking I/O internally,
  especially under Ruby 1.9.2

=== Rainbows! 0.97.0 / 2010-08-28 19:46 UTC

  We now depend on Unicorn 1.1.3 to avoid race conditions during
  log cycling.  This bug mainly affected folks using Rainbows! as
  a multithreaded static file server.

  "keepalive_timeout 0" now works as documented for all backends
  to completely disable keepalive.  This was previously broken
  under EventMachine, Rev, and Revactor.

  There is a new Rainbows::ThreadTimeout Rack middleware which
  gives soft timeouts to apps running on multithreaded backends.

  There are several bugfixes for proxying IO objects and the usual
  round of small code cleanups and documentation updates.

  See the commits in git for all the details.

=== Rainbows! 0.96.0 - range support / 2010-08-03 09:04 UTC

  For concurrency models that use sendfile or IO.copy_stream, HTTP
  Range requests are honored when serving static files.  Due to
  the lack of known use cases, multipart range responses are not
  supported.

  When serving static files with sendfile and proxying
  pipe/socket bodies, responses bodies are always properly closed
  and we have more test cases for dealing with prematurely
  disconnecting clients.

  Concurrency model specific changes:

  EventMachine, NeverBlock -
  * keepalive is now supported when proxying pipes/sockets
  * pipelining works properly when using EM::FileStreamer
  * these remain the only concurrency models _without_
    Range support (EM::FileStreamer doesn't support ranges)

  Rev, RevThreadSpawn, RevThreadPool -
  * keepalive is now supported when proxying pipes/sockets
  * pipelining works properly when using sendfile

  RevThreadPool -
  * no longer supported under 1.8, it pegs the CPU at 100%.
    Use RevThreadSpawn (or any other concurrency model) if
    you're on 1.8, or better yet, switch to 1.9.

  Revactor -
  * proxying pipes/sockets with DevFdResponse is much faster
    thanks to a new Actor-aware IO wrapper (used transparently
    with DevFdResponse)
  * sendfile support added, along with Range responses

  FiberSpawn, FiberPool, RevFiberSpawn -
  * Range responses supported when using sendfile

  ThreadPool, ThreadSpawn, WriterThreadPool, WriterThreadSpawn -
  * Range responses supported when using sendfile or
    IO.copy_stream.

  See the full git logs for a list of all changes.

=== Rainbows! v0.95.1 - depend on newer Unicorn / 2010-07-11 02:53 UTC

  Eric Wong (3):
    test_isolate: document why we test with Rack 1.1.0
    doc: make RDoc skip private methods
    bump Unicorn dependency to 1.1.1

=== Rainbows! 0.95.0 - sendfile() support! / 2010-07-10 08:45 UTC

  In addition to the 1.9-only IO.copy_stream, the new sendfile
  1.0.0 gem may optionally be used with most concurrency models
  (even under 1.8).

  See http://rainbows.rubyforge.org/Static_Files.html for more info

  Other changes:

  * 1.9 encoding bugfix for (Rev)FiberSpawn and FiberPool
  * fixed potential rack.input corruption with Revactor
  * ThreadPool graceful shutdown no longer blocks until timeout
  * optional ServerToken middleware for to display Server: header
  * Dependencies bumped to Rack 1.1+ and Unicorn 1.1.0+
  * numerous internal cleanups, small bugfixes and speedups
  * more concise website oriented at users

=== Rainbows! 0.94.0 - one eight ate my homework! / 2010-06-04 08:42 UTC

  This release fixes corrupted large response bodies for Ruby 1.8
  users with the WriterThreadSpawn and WriterThreadPool models
  introduced in 0.93.0.  This bug did not affect Ruby 1.9 users
  nor the users of any older concurrency models.

  There is also a strange new Rainbows::Sendfile middleware.  It
  is used to negate the effect of Rack::Contrib::Sendfile, if that
  makes sense.  See the RDoc or
  http://rainbows.rubyforge.org/Rainbows/Sendfile.html for all the
  gory details.

  Finally, the RDoc for our test suite is on the website:

    http://rainbows.rubyforge.org/Test_Suite.html

  I wrote this document back when the project started but
  completely forgot to tell RDoc about it.  Personally, this
  test suite is one of my favorite parts of the project.

=== Rainbows! 0.93.0 - MOAR!!!1 / 2010-05-29 06:20 UTC

  In our race to have more concurrency options than real sites
  using this server, we've added two new and fully supported
  concurrency models: WriterThreadSpawn and WriterThreadPool

  They're both designed to for serving large static files and work
  best with IO.copy_stream (sendfile!) under Ruby 1.9.  They may
  also be used to dynamically generate long running, streaming
  responses after headers are sent (use "proxy_buffering off" with
  nginx).

  Unlike most concurrency options in Rainbows!, these are designed
  to run behind nginx (or haproxy if you don't support POST/PUT
  requests) and are vulnerable to slow client denial of service
  attacks.

  I floated the idea of doing something along these lines back in
  the early days of Unicorn, but deemed it too dangerous for some
  applications.  But nothing is too dangerous for Rainbows!  So
  here they are now for your experimentation.

=== Rainbows! 0.92.0 - inching towards the pot of gold / 2010-05-04 21:58 UTC

  Mostly internal cleanups and small improvements.

  The only backwards incompatible change was the addition of the
  "client_max_body_size" parameter to limit upload sizes to
  prevent DoS.  This defaults to one megabyte (same as nginx), so
  any apps relying on the limit-less behavior of previous will
  have to configure this in the Unicorn/Rainbows! config file:

        Rainbows! do
  	# nil for unlimited, or any number in bytes
  	client_max_body_size nil
        end

  The ThreadSpawn and ThreadPool models are now optimized for serving
  large static files under Ruby 1.9 using IO.copy_stream[1].

  The EventMachine model has always had optimized static file
  serving (using EM::Connection#stream_file_data[2]).

  The EventMachine model (finally) gets conditionally deferred app
  dispatch in a separate thread, as described by Ezra Zygmuntowicz
  for Merb, Ebb and Thin[3].

  [1] - http://euruko2008.csrug.cz/system/assets/documents/0000/0007/tanaka-IOcopy_stream-euruko2008.pdf
  [2] - http://eventmachine.rubyforge.org/EventMachine/Connection.html#M000312
  [3] - http://brainspl.at/articles/2008/04/18/deferred-requests-with-merb-ebb-and-thin

=== Rainbows! 0.91.1 - use a less-broken parser from Unicorn / 2010-04-19 21:13 UTC

  This release fixes a denial-of-service vector for deployments
  exposed directly to untrusted clients.

  The HTTP parser in Unicorn <= 0.97.0 would trip an assertion
  (killing the associated worker process) on invalid
  Content-Length headers instead of raising an exception.  Since
  Rainbows! and Zbatery supports multiple clients per worker
  process, all clients connected to the worker process that hit
  the assertion would be aborted.

  Deployments behind nginx are _not_ affected by this bug, as
  nginx will reject clients that send invalid Content-Length
  headers.

  The status of deployments behind other HTTP-aware proxies is
  unknown.  Deployments behind a non-HTTP-aware proxy (or no proxy
  at all) are certainly affected by this DoS.

  Users are strongly encouraged to upgrade as soon as possible,
  there are no other changes besides this bug fix from Rainbows!
  0.91.0 nor Unicorn 0.97.0

  This bug affects all previously released versions of Rainbows!
  and Zbatery.

=== Rainbows! 0.91.0 - Unicorn resync / 2010-03-01 10:03 UTC

  Unicorn 0.97.0 has a bunch of internal cleanups and small fixes
  and this is mainly to resync with those changes.

  keepalive_timeout now defaults to 5 seconds (from 2 seconds
  previous).  This should help out clients on slower connections.

  Some small fixes and cleanups:

  * Rainbows::Fiber::IO objects may leak if a rare app uses them
    explicitly with FiberSpawn/FiberPool-only (not RevFiberSpawn)

  * quiet down ENOTCONN handling, there's nothing we can do about
    this error so we won't fill our logs with it.

=== Rainbows! 0.90.2 / 2010-02-13 09:11 UTC

  This release depends on Unicorn 0.96.1 for an updated
  Unicorn::HttpParser to avoid leaking memory.

  The HttpParser in Unicorn <= 0.96.0 did not setup the parser
  object properly to be freed by the garbage collector.

  While this bug did not affect Unicorn itself, Rainbows!
  allocates a new Unicorn::HttpParser object for every new client
  connection and Unicorn did not properly setup the parser object
  to be freed by the Ruby garbage collector.

  There are also minor cosmetic cleanups and fixes:

  Eric Wong (10):
        http_response: disallow blank, multi-value headers
        Fix "rainbows -h" and "rainbows -v"
        Update docs + tests to reflect Rev 0.3.2 release
        local.mk.sample: bump Rack dependency
        Merge branch 'rack-1.1'
        add Cramp integration tests
        Rakefile: autoload Gem
        t/bin/*: encoding should be the first line after shebang
        gemspec: bump dependency on Unicorn to avoid leak
        Rainbows! 0.90.2

=== Rainbows! 0.90.1 / 2009-12-30 10:24 UTC

  This release contains minor bugfixes/compatibility improvements
  for ThreadSpawn, ThreadPool and EventMachine users.

  Excessive error messages from spurious wakeups using
  ThreadSpawn/ThreadPool under most platforms are silenced.  Only
  Ruby 1.9 users under Linux were unaffected by this bug.

  EventMachine users may now use EM::Deferrable objects in
  responses, vastly improving compatibility with existing
  async_sinatra apps.

=== Rainbows! 0.90.0 / 2009-12-22 21:54 UTC

  This release should fix ThreadSpawn green thread blocking issues
  under MRI 1.8.  Excessive socket closing is avoided when using
  Thread* models with Sunshowers (or clients disconnecting
  during uploads).

  There is a new RevFiberSpawn concurrency model which combines
  Rev with the traditional FiberSpawn model.

=== Rainbows! 0.9.0 / 2009-12-13 22:51 UTC

  This release introduces compatibility with Sunshowers, a library
  for Web Sockets, see http://rainbows.rubyforge.org/sunshowers
  for more information.  Several small cleanups and fixes.

  Eric Wong (20):
        add RevThreadPool to README
        rev: do not initialize a Rev::Loop in master process
        rainbows.1: update headers
        do not log IOError raised during app processing
        move "async.callback" constant to EvCore
        larger thread  pool default sizes ({Rev,}ThreadPool)
        ev_core: no need to explicitly close TmpIOs
        EventMachine: allow usage as a base class
        NeverBlock: resync with recent our EM-related expansion
        RevThread*: move warning message to a saner place
        EventMachineDefer: preliminary (and) broken version
        TODO: add EM Deferrables
        RevThread*: remove needless nil assignment
        README: HTML5 Web Sockets may not be supported, yet...
        env["hack.io"] for Fiber*, Revactor, Thread* models
        EventMachineDefer is experimental
        README: add Sunshowers reference
        Rakefile: resync with Unicorn
        doc/comparison: add Web Sockets to comparison
        README updates

=== Rainbows! 0.8.0 / 2009-12-02 08:55 UTC

  This release fixes a memory leak in our existing Revactor
  concurrency model.  A new RevThreadPool concurrency model has
  been added as well as small cleaups to exit handling in workers.

=== Rainbows! 0.7.0 / 2009-11-30 04:21 UTC

  keepalive_timeout (default: 2 seconds) is now supported to
  disconnect idle connections.  Several new concurrency models
  added include: NeverBlock, FiberSpawn and FiberPool; all of
  which have only been lightly tested.  RevThreadSpawn loses
  streaming input support to become simpler and faster for the
  general cases.  AppPool middleware is now compatible with all
  Fiber-based models including Revactor and NeverBlock.

  A new document gives a summary of all the options we give you:

    http://rainbows.rubyforge.org/Summary.html

  If you're using any of the Rev-based concurrency models, the
  latest iobuffer (0.1.3) gem will improve performance.  Also,
  RevThreadSpawn should become usable under MRI 1.8 with the next
  release of Rev (0.3.2).

=== Rainbows! 0.6.0 - bugfixes galore / 2009-11-15 23:29 UTC

  Client shutdowns/errors when streaming "rack.input" into the
  Rack application are quieter now.  Rev and EventMachine workers
  now shutdown correctly when the master dies.  Worker processes
  now fail gracefully if log reopening fails.  ThreadSpawn and
  ThreadPool models now load Unicorn classes in a thread-safe way.

  There's also an experimental RevThreadSpawn concurrency
  model which may be heavily reworked in the future...

  Eric Wong (30):
        Threaded models have trouble with late loading under 1.9
        cleanup worker heartbeat and master deathwatch
        tests: allow use of alternative sha1 implementations
        rev/event_machine: simplify keepalive checking a bit
        tests: sha1.ru now handles empty bodies
        rev: split out further into separate files for reuse
        rev: DeferredResponse is independent of parser state
        remove unnecessary class variable
        ev_core: cleanup handling of APP constant
        rev: DeferredResponse: always attach to main loop
        initial cut of the RevThreadSpawn model
        rev_thread_spawn/revactor: fix TeeInput for short reads
        rev_thread_spawn: make 1.9 TeeInput performance tolerable
        tests: add executable permissions to t0102
        tests: extra check to avoid race in reopen logs test
        rev_thread_spawn: 16K chunked reads work better
        tests: ensure proper accounting of worker_connections
        tests: heartbeat-timeout: simplify and avoid possible race
        tests: ensure we process "START" from FIFO when starting
        http_response: don't "rescue nil" for body.close
        cleanup error handling pieces
        tests: more stringent tests for error handling
        revactor/tee_input: unnecessary error handling
        gracefully exit workers if reopening logs fails
        revactor/tee_input: raise ClientDisconnect on EOFError
        bump versions since we depend on Unicorn::ClientShutdown
        revactor/tee_input: share error handling with superclass
        RevThreadSpawn is still experimental
        Revert "Threaded models have trouble with late loading under 1.9"
        Rakefile: add raa_update task

=== Rainbows! 0.5.0 / 2009-11-05 10:27 UTC

  We depend on the just-released Unicorn 0.94.0 for the fixed
  trailer handling.  As with `unicorn', the `rainbows' executable
  now sets and respects ENV["RACK_ENV"].  Also small fixes and
  cleanups including better FreeBSD 7.2 compatibility and
  less likely to over-aggressively kill slow/idle workers
  when a very low timeout is set.

  Eric Wong (20):
        rev: split out heartbeat class
        bump Unicorn dependency to (consistently) pass tests
        tests: avoid single backquote in echo
        event_machine: avoid slurping when proxying
        tests: make timeout tests reliable under 1.9
        thread_pool: comment for potential SMP issue under 1.9
        Allow 'use "model"' as a string as well as symbol
        Rev model is the only user of deferred_bodies
        ev_core: use Tempfile instead of Unicorn::Util::tmpio
        ev_core: ensure quit is triggered on all errors
        rainbows: set and use process-wide ENV["RACK_ENV"]
        http_server: add one second to any requested timeout
        thread_pool: update fchmod heartbeat every second
        t0004: tighten up timeout test
        ev_core: remove Tempfile usage once again
        cleanup: remove unused t????.ru test files
        tests: staggered trailer upload test
        ensure RACK_ENV is inherited from the parent env
        t0100: more precise `expr` usage

=== Rainbows! 0.4.0 / 2009-10-27 08:44 UTC

  Basic single-threaded EventMachine support is now included.  It
  supports async_synatra[1] via the "async.callback" Rack
  environment[2].  For EventMachine, we rely on the updated
  attach/watch API in EventMachine 0.12.10.

  As Revactor 0.1.5 is now available, our Revactor support now
  depends on it as it adds the ability to listen on UNIX domain
  sockets.

  Of course, all dependencies (besides Unicorn and Rack) are
  soft and only loaded if your configured concurrency model
  requires it.

  For developers/QA folks, the integration tests are completely
  revamped for easier maintenance when new concurrency models are
  introduced and should also produce TAP-compliant output.  The
  test suite remains highly parallelizable using GNU make.

  There are immediate plans to expand support for both Rev and
  EventMachine to support use with threaded application dispatch.

  Eric Wong (41):
        rev: remove Revactor-specific workaround
        README: change ordering of concurrency model listing
        tests: more correct HTTP/0.9 test
        test-lib: avoid stalling due to bad FIFO handling
        rev: fix static file responses under HTTP/0.9
        add news bodies to site NEWS.atom.xml
        tests: avoid needlessly remaking "rainbows"
        initial EventMachine support
        tests: hopefully fix stalls in input trailer tests
        tests: avoid race condition in reopen logs test
        tests: prefer "RUBY" to lowercased "ruby"
        tests: common setup and wait_start functions
        tests: add a TAP producer shell library
        tests: port all existing tests to TAP library
        tests: remove symlinks and small files, use Make
        t9000: bail if run with an unsupported/pointless model
        tests: allow "make $model" to run tests for that model
        rev: spell ECONNABORTED correctly
        rev/evma: move common code for event models into ev_core
        ev_core: do not drop deferred bodies on graceful quits
        eventmachine: get basic tests working
        rev: do not File.expand_path on result of body.to_path
        eventmachine 0.12.8 passes all tests
        tests: make large file memory tests more reliable
        eventmachine: require EM 0.12.10
        update gem dependencies in comments/local.mk.sample
        rev: enforce Rev::VERSION >= 0.3.0
        eventmachine: add async_sinatra support
        tests: only load Revactor tests under 1.9.1
        tests: gracefully exit if EventMachine is not available
        tests: error out if socat + curl aren't reachable
        thread*: fix MRI 1.8.6 compatibility
        local.mk.sample: cleanups and minor reorg
        eventmachine: remove unnecessary ivar assignment
        eventmachine: document our support of "async_synatra"
        doc: Update TODO and README
        tests: generate all dependencies atomically
        app_pool: update RDoc
        test-lib: DWIM handling of temp UNIX sockets
        revactor: require 0.1.5, remove 0.1.4 workarounds
        gemspec: bump up Unicorn dep version to 0.93.4

  [1] http://github.com/raggi/async_sinatra
  [2] this is not 100% Rack::Lint compatible, but we'll let it
      slide since there are already folks depending on
      the async_sinatra gem

=== Rainbows! 0.3.0 / 2009-10-19 18:21 UTC

  The major feature of this release is the new DeferredResponse
  middleware for the Rev-based concurrency model.  It should be
  transparently compatible with non-Rev models, as well.  As a
  pleasant side effect, this change also allows large files to be
  streamed to the client with Rev as the socket becomes writable
  instead of slurping the entire file into an IO::Buffer first.

  Bugfixes to graceful shutdowns support for all concurrency
  models.  The Rev-based model also gets a working heartbeat
  mechanism (oops!) and fixed HTTP/1.1 pipelining support.

  Eric Wong (38):
        app_pool: note it being currently broken with Revactor
        Revactor tests can sleep more easily
        tests: sleep.ru handles "Expect: 100-continue"
        Fix graceful shutdown handling of Thread* models harder
        DRY setting of rack.multithread
        test-lib: dbgcat adds headers with key name
        use timeout correctly to join threads on SIGQUIT
        Rev: simplification to error handling
        tests: sleep.ru slurps rack.input stream
        refactor graceful shutdowns again, harder
        tests: introduce require_for_model function
        tests: add unbuffered tee(1)-like helper
        tests: rack.input trailer tests for all models
        tests: fix issues with non-portable shell constructs
        tests: fix random_blob dependency
        tests: factor out a common parser error "library"
        tests: DRY setting of the "model" environment var
        tests: DRY Ruby requires based on model
        test-lib: quiet down pipefail error message
        tests: DRY require tests for Rev/Revactor
        rev: handle fully-buffered, pipelined requests
        rev: avoid stack overflow through pipelining
        tests: common basic HTTP tests for all models
        tests: rack.input hammer concurrency testing
        tests: for log reopening for all concurrency models
        http_response: filter out X-Rainbows-* headers
        rev: fix heartbeat timeouts
        revactor: switch to a 1 second heartbeat
        rev: async response bodies with DevFdResponse middleware
        tests: more reliable error checking
        tests: DWIM FIFO creation
        tests: predictable and simpler tempfile management
        rev: AsyncResponse => DeferredResponse API cleanup
        rev: update documentation for this model
        TUNING: update documentation notes
        TODO: update with new items
        local.mk.sample: sync with BDFL's version
        Rainbows! 0.3.0

=== Rainbows! 0.2.0 / 2009-10-15 08:01 UTC

  This release adds preliminary Rev support for network
  concurrency under Ruby 1.8 and Ruby 1.9.  There are caveats to
  this model and reading the RDoc for Rainbows::Rev is
  recommended.

  Rainbows::AppPool Rack middleware is now available to limit
  application concurrency on a per-process basis independently of
  network concurrency.  See the RDoc for this class for further
  details.

  Per-client timeouts have been removed, see
  http://mid.gmane.org/20091013062602.GA13128@dcvr.yhbt.net
  for the reasoning.

  Rack environment changes:

  * "rack.multithread" is now only true for models with "Thread"
    in their name.  Enabling thread-safe (but not reentrant) code
    may actually be harmful for Revactor.

  * "rainbows.model" is now exposed so the application can easily
    figure out which network concurrency model is in use.

  Bugfixes include better shutdown and error handling for all
  existing models, OpenBSD compatibility for the per-process
  heartbeat (same as found in unicorn v0.93.3).

  Eric Wong (54):
        add SIGNALS doc to RDoc
        SIGNALS: add Rainbows!-specific notes
        doc: better "Rainbows!" RDoc examples and linkage
        tests: generate random_blob once for all tests
        tests: move trash files to their own trash/ directory
        t0000: basic test includes keepalive + pipelining
        tests: simplify temporary file management
        tests: add dbgcat() utility method
        fchmod heartbeat flips between 0/1
        tests: add revactor pipelining/keepalive test
        thread_spawn: trap EAGAIN on accept_nonblock
        thread_spawn: more robust loop
        thread_spawn: non-blocking accept() shouldn't EINTR
        tests: enable pipefail shell option if possible
        README for test suite
        tests: TEST_OPTS => SH_TEST_OPTS
        tests: update TRACER examples in makefile
        tests: create a bad exit code by default
        thread_spawn: clean up nuking of timed-out threads
        factor out common listen loop error handling
        graceful exit on trap TypeError from IO.select
        expand and share init_worker_process
        revactor: break on EBADF in the accepting actors
        revactor: cleanups and remove redundancy
        No need to be halving timeout, already done for us
        revactor: graceful death of keepalive clients
        revactor: continue fchmod beat in graceful exit
        cleanup thread models, threads no longer time out
        revactor: fix graceful shutdown timeouts
        Fix graceful shutdowns for threaded models
        SIGINT/SIGTERM shuts down instantly in workers
        tests: check for common exceptions with "Error"
        DEPLOY: update with notes on DoS potential
        tests: add reopen logs test for revactor
        vs Unicorn: use diagrams for concurrency models
        vs Unicorn: fix wording to be consistent with diagrams
        vs Unicorn: fix copy+paste errors and grammar fail
        README: alter reply conventions for the mailing list
        preliminary Rev support
        local.mk.sample: use ksh93 as default $(SHELL)
        rack.multithread is only true for Thread* models
        Rev: general module documentation + caveats
        Rev: fix error handling for parser errors
        t3003: set executable bit
        documentation updates (mostly on network models)
        rack: expose "rainbows.model" in Rack environment
        tests: enforce rack.multithread and rainbows.model
        README: update URLs
        README: update with Rev model caveats
        Add Rainbows::AppPool Rack middleware
        t4003: chmod +x
        local.mk.sample: use rev 0.3.1 instead
        README: link to AppPool and extra note about Rev model
        Rainbows! 0.2.0

=== Rainbows! v0.1.1 / 2009-10-06 03:51 UTC

  Fixed Ruby 1.8 support (and all 1.9 systems without Revactor).
  Process-wide timeout handling for the ThreadSpawn concurrency
  model should now work properly.  Small cleanups everywhere.

  Eric Wong (16):
        Rakefile: add publish_news target
        Fix NEWS generation on single-paragraph tag messages
        README: move RDoc links down to fix gem description
        README: add install instructions
        summary: s/slow apps/sleepy apps/g
        Avoid naming names in LICENSE/README files
        rainbows/base: cleanup constant include
        tests: quiet down bin installation
        Add top-level "test" target for make
        local.mk.sample: sync to my current version
        tests: allow "make V=2" to set TEST_OPTS += -x
        cleanup temporary file usage in tests
        local.mk.sample: fix revactor dependency
        Thread* models: cleanup timeout management
        thread_spawn: fix timeout leading to worker death
        less error-prone timeouts for Thread models

=== Rainbows! 0.1.0 / 2009-10-05 10:44 UTC

  Initial release

  This release is currently highly experimental and is still
  missing a lot of test coverage.

rainbows-5.0.0/Static_Files0000644000004100000410000000505512641135250015670 0ustar  www-datawww-data= Static file serving with \Rainbows!

While Ruby application servers aren't traditionally used to serve static
files, it'll be fun for us to see how far we can go with \Rainbows!

We aren't delusional enough (yet :) to compete with C-based servers like
nginx or lighttpd in terms of raw performance, but wouldn't it be nice
to simplify your deployments and only deploy one server?

== {sendfile}[http://rubygems.org/gems/sendfile] RubyGem

To enable the "sendfile" gem, just make sure you have 1.1.0 or later and
"require" it in your \Rainbows!/unicorn config file (not your Rack
config.ru):

    require 'sendfile' # that's it! nothing else to do

    # the rest of you Rainbows! config goes below:
    worker_processes 4
    stderr_path "/var/log/app/rainbows.err.log"
    Rainbows! do
      use :RevFiberSpawn
      worker_connections 100
    end

The sendfile gem is works for all of our concurrency models except
NeverBlock and EventMachine (see below).

The sendfile gem is less buggy than current (Ruby 1.9.2)
IO.copy_stream and supports FreeBSD and Solaris in addition to Linux.
This RubyGem also works under Ruby 1.8 (even with threads) and should
work with rubinius.git, too.

\Rainbows! supports the sendfile gem since v0.95.0

== IO.copy_stream (Ruby 1.9 only)

Users of pure-Ruby Thread-based models ThreadPool, ThreadSpawn, and
their Writer* variants use the core IO.copy_stream method under Ruby
1.9.  IO.copy_stream uses sendfile() under Linux, and a pread()/write()
loop (implemented in C) on other systems.

IO.copy_stream under Linux with Ruby 1.9.2 (and before) is also
subject to hanging indefinitely when a client disconnected prematurely.
This issue is fixed in Ruby trunk (r28557, July 2010).

\Rainbows! supports IO.copy_stream since v0.93.0

== EventMachine FileStreamer

EventMachine and NeverBlock users automatically take advantage of the
mmap()-based FileStreamer class distributed with EventMachine.
Unfortunately, as of EventMachine 0.12.10, FileStreamer cannot easily
support HTTP Range responses.

\Rainbows! supports EventMachine FileStreamer since v0.4.0

== Performance

With large files and high-throughput clients, there should be little
performance difference compared to optimal C implementation such as
nginx and lighttpd.  Ruby runtime overhead matters more when serving
slower clients and smaller files.

== The Future...

We'll also support an open file cache (similar to nginx) which
allows us to reuse open file descriptors.

Under Linux, we'll support the splice(2) system call for zero-copy
proxying {io_splice}[http://bogomips.org/ruby_io_splice/], too.
rainbows-5.0.0/lib/0000755000004100000410000000000012641135250014135 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/0000755000004100000410000000000012641135250015761 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/actor_spawn.rb0000644000004100000410000000205412641135250020627 0ustar  www-datawww-data# -*- encoding: binary -*-

require 'actor'

# Actor concurrency model for Rubinius.  We can't seem to get message
# passing working right, so we're throwing a Mutex into the mix for
# now.  Hopefully somebody can fix things for us.  Currently, this is
# exactly the same as the ThreadSpawn model since we don't use the
# message passing capabilities of the Actor model (and even then
# it wouldn't really make sense since Actors in Rubinius are just
# Threads underneath and our ThreadSpawn model is one layer of
# complexity less.
#
# This is different from the Revactor one which is not prone to race
# conditions within the same process at all (since it uses Fibers).
module Rainbows::ActorSpawn
  include Rainbows::ThreadSpawn

  # runs inside each forked worker, this sits around and waits
  # for connections and doesn't die until the parent dies (or is
  # given a INT, QUIT, or TERM signal)
  def worker_loop(worker) # :nodoc:
    Rainbows::Const::RACK_DEFAULTS["rack.multithread"] = true # :(
    init_worker_process(worker)
    accept_loop(Actor)
  end
end
rainbows-5.0.0/lib/rainbows/xepoll_thread_spawn/0000755000004100000410000000000012641135250022023 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/xepoll_thread_spawn/client.rb0000644000004100000410000000523612641135250023634 0ustar  www-datawww-data# -*- encoding: binary -*-
# :stopdoc:
module Rainbows::XEpollThreadSpawn::Client
  Rainbows.config!(self, :keepalive_timeout, :client_header_buffer_size)
  N = Raindrops.new(1)
  ACCEPTORS = Rainbows::HttpServer::LISTENERS.dup
  extend Rainbows::WorkerYield

  def self.included(klass) # included in Rainbows::Client
    max = Rainbows.server.worker_connections
    ACCEPTORS.map! do |sock|
      Thread.new do
        buf = ""
        begin
          if io = sock.kgio_accept(klass)
            N.incr(0, 1)
            io.epoll_once(buf)
          end
          worker_yield while N[0] >= max
        rescue => e
          Rainbows::Error.listen_loop(e)
        end while Rainbows.alive
      end
    end
  end

  ep = SleepyPenguin::Epoll
  EP = ep.new
  IN = ep::IN | ep::ONESHOT
  KATO = {}.compare_by_identity
  LOCK = Mutex.new
  Rainbows.at_quit do
    clients = nil
    LOCK.synchronize { clients = KATO.keys; KATO.clear }
    clients.each { |io| io.closed? or io.shutdown }
  end
  @@last_expire = Rainbows.now

  def kato_set
    LOCK.synchronize { KATO[self] = @@last_expire }
    EP.set(self, IN)
  end

  def kato_delete
    LOCK.synchronize { KATO.delete self }
  end

  def self.loop
    buf = ""
    begin
      EP.wait(nil, 1000) { |_, obj| obj.epoll_run(buf) }
      expire
    rescue Errno::EINTR
    rescue => e
      Rainbows::Error.listen_loop(e)
    end while Rainbows.tick || N[0] > 0
    Rainbows::JoinThreads.acceptors(ACCEPTORS)
  end

  def self.expire
    return if ((now = Rainbows.now) - @@last_expire) < 1.0
    if (ot = KEEPALIVE_TIMEOUT) >= 0
      ot = now - ot
      defer = []
      LOCK.synchronize do
        KATO.delete_if { |client, time| time < ot and defer << client }
      end
      defer.each { |io| io.closed? or io.close }
    end
    @@last_expire = now
  end

  def epoll_once(buf)
    @hp = Rainbows::HttpParser.new
    epoll_run(buf)
  end

  def close
    super
    kato_delete
    N.decr(0, 1)
    nil
  end

  def handle_error(e)
    super
    ensure
      closed? or close
  end

  def epoll_run(buf)
    case kgio_tryread(CLIENT_HEADER_BUFFER_SIZE, buf)
    when :wait_readable
      return kato_set
    when String
      kato_delete
      env = @hp.add_parse(buf) and return spawn(env, @hp)
    else
      return close
    end while true
    rescue => e
      handle_error(e)
  end

  def spawn(env, hp)
    Thread.new { process_pipeline(env, hp) }
  end

  def pipeline_ready(hp)
    hp.parse and return true
    case buf = kgio_tryread(CLIENT_HEADER_BUFFER_SIZE)
    when :wait_readable
      kato_set
      return false
    when String
      hp.add_parse(buf) and return true
      # continue loop
    else
      return close
    end while true
  end
end
rainbows-5.0.0/lib/rainbows/never_block/0000755000004100000410000000000012641135250020252 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/never_block/event_machine.rb0000644000004100000410000000036712641135250023412 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
class Rainbows::NeverBlock::Client < Rainbows::EventMachine::Client
  def app_call input
    POOL.spawn do
      begin
        super input
      rescue => e
        handle_error(e)
      end
    end
  end
end
rainbows-5.0.0/lib/rainbows/never_block/core.rb0000644000004100000410000000102612641135250021526 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
module Rainbows::NeverBlock::Core
  def init_worker_process(worker)
    super
    o = Rainbows::O
    pool = NB::Pool::FiberPool.new(o[:pool_size])
    base = o[:backend].to_s.gsub!(/([a-z])([A-Z])/, '\1_\2').downcase!
    require "rainbows/never_block/#{base}"
    client_class = Rainbows::NeverBlock::Client
    client_class.superclass.const_set(:APP, Rainbows.server.app)
    client_class.const_set(:POOL, pool)
    logger.info "NeverBlock/#{o[:backend]} pool_size=#{o[:pool_size]}"
  end
end
rainbows-5.0.0/lib/rainbows/max_body/0000755000004100000410000000000012641135250017563 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/max_body/wrapper.rb0000644000004100000410000000305712641135250021575 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
#
# This is only used for chunked request bodies, which are rare
class Rainbows::MaxBody::Wrapper
  def initialize(rack_input, limit)
    @input, @limit, @rbuf = rack_input, limit, ''
  end

  def each
    while line = gets
      yield line
    end
  end

  # chunked encoding means this method behaves more like readpartial,
  # since Rack does not support a method named "readpartial"
  def read(length = nil, rv = '')
    if length
      if length <= @rbuf.size
        length < 0 and raise ArgumentError, "negative length #{length} given"
        rv.replace(@rbuf.slice!(0, length))
      elsif @rbuf.empty?
        checked_read(length, rv) or return
      else
        rv.replace(@rbuf.slice!(0, @rbuf.size))
      end
      rv.empty? && length != 0 ? nil : rv
    else
      rv.replace(read_all)
    end
  end

  def gets
    sep = $/
    if sep.nil?
      rv = read_all
      return rv.empty? ? nil : rv
    end
    re = /\A(.*?#{Regexp.escape(sep)})/

    begin
      @rbuf.sub!(re, '') and return $1

      if tmp = checked_read(16384)
        @rbuf << tmp
      elsif @rbuf.empty? # EOF
        return nil
      else # EOF, return whatever is left
        return @rbuf.slice!(0, @rbuf.size)
      end
    end while true
  end

  def checked_read(length = 16384, buf = '')
    if @input.read(length, buf)
      throw :rainbows_EFBIG if ((@limit -= buf.size) < 0)
      return buf
    end
  end

  def read_all
    rv = @rbuf.slice!(0, @rbuf.size)
    tmp = ''
    while checked_read(16384, tmp)
      rv << tmp
    end
    rv
  end
end
rainbows-5.0.0/lib/rainbows/max_body/rewindable_wrapper.rb0000644000004100000410000000045612641135250023771 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
class Rainbows::MaxBody::RewindableWrapper < Rainbows::MaxBody::Wrapper
  def initialize(rack_input, limit)
    @orig_limit = limit
    super
  end

  def rewind
    @limit = @orig_limit
    @rbuf = ''
    @input.rewind
  end

  def size
    @input.size
  end
end
rainbows-5.0.0/lib/rainbows/rev_fiber_spawn.rb0000644000004100000410000000115612641135250021464 0ustar  www-datawww-data# -*- encoding: binary -*-
Rainbows.const_set(:RevFiberSpawn, Rainbows::CoolioFiberSpawn)

# CoolioFiberSpawn is the new version of this, use that instead.
#
# A combination of the Rev and FiberSpawn models.  This allows Ruby
# 1.9 Fiber-based concurrency for application processing while
# exposing a synchronous execution model and using scalable network
# concurrency provided by Rev.  A streaming "rack.input" is exposed.
# Applications are strongly advised to wrap all slow IO objects
# (sockets, pipes) using the Rainbows::Fiber::IO or a Rev-compatible
# class whenever possible.
module Rainbows::RevFiberSpawn; end
rainbows-5.0.0/lib/rainbows/writer_thread_spawn.rb0000644000004100000410000000251212641135250022361 0ustar  www-datawww-data# -*- encoding: binary -*-
require 'thread'
# This concurrency model implements a single-threaded app dispatch and
# spawns a new thread for writing responses.  This concurrency model
# should be ideal for apps that serve large responses or stream
# responses slowly.
#
# Unlike most \Rainbows! concurrency models, WriterThreadSpawn is
# designed to run behind nginx just like Unicorn is.  This concurrency
# model may be useful for existing Unicorn users looking for more
# output concurrency than socket buffers can provide while still
# maintaining a single-threaded application dispatch (though if the
# response body is generated on-the-fly, it must be thread safe).
#
# For serving large or streaming responses, setting
# "proxy_buffering off" in nginx is recommended.  If your application
# does not handle uploads, then using any HTTP-aware proxy like
# haproxy is fine.  Using a non-HTTP-aware proxy will leave you
# vulnerable to slow client denial-of-service attacks.

module Rainbows::WriterThreadSpawn
  include Rainbows::Base
  autoload :Client, 'rainbows/writer_thread_spawn/client'

  def process_client(client) # :nodoc:
    Client.new(client).process_loop
  end

  def worker_loop(worker)  # :nodoc:
    Client.const_set(:MAX, worker_connections)
    super # accept loop from Unicorn
    Client.quit
  end
  # :startdoc:
end
# :enddoc:
rainbows-5.0.0/lib/rainbows/event_machine.rb0000644000004100000410000001034412641135250021115 0ustar  www-datawww-data# -*- encoding: binary -*-
require 'eventmachine'
EM::VERSION >= '0.12.10' or abort 'eventmachine 0.12.10 is required'

# Implements a basic single-threaded event model with
# {EventMachine}[http://rubyeventmachine.com/].  It is capable of
# handling thousands of simultaneous client connections, but with only
# a single-threaded app dispatch.  It is suited for slow clients,
# and can work with slow applications via asynchronous libraries such as
# {async_sinatra}[http://github.com/raggi/async_sinatra],
# {Cramp}[http://cramp.in/],
# and {rack-fiber_pool}[http://github.com/mperham/rack-fiber_pool].
#
# It does not require your Rack application to be thread-safe,
# reentrancy is only required for the DevFdResponse body
# generator.
#
# Compatibility: Whatever \EventMachine ~> 0.12.10 and Unicorn both
# support, currently Ruby 1.8/1.9.
#
# This model is compatible with users of "async.callback" in the Rack
# environment such as
# {async_sinatra}[http://github.com/raggi/async_sinatra].
#
# For a complete asynchronous framework,
# {Cramp}[http://cramp.in/] is fully
# supported when using this concurrency model.
#
# This model is fully-compatible with
# {rack-fiber_pool}[http://github.com/mperham/rack-fiber_pool]
# which allows each request to run inside its own \Fiber after
# all request processing is complete.
#
# Merb (and other frameworks/apps) supporting +deferred?+ execution as
# documented at Rainbows::EventMachine::TryDefer
#
# This model does not implement as streaming "rack.input" which allows
# the Rack application to process data as it arrives.  This means
# "rack.input" will be fully buffered in memory or to a temporary file
# before the application is entered.
#
# === RubyGem Requirements
#
# * event_machine 0.12.10
module Rainbows::EventMachine
  autoload :ResponsePipe, 'rainbows/event_machine/response_pipe'
  autoload :ResponseChunkPipe, 'rainbows/event_machine/response_chunk_pipe'
  autoload :TryDefer, 'rainbows/event_machine/try_defer'
  autoload :Client, 'rainbows/event_machine/client'

  include Rainbows::Base

  # Cramp (and possibly others) can subclass Rainbows::EventMachine::Client
  # and provide the :em_client_class option.  We /don't/ want to load
  # Rainbows::EventMachine::Client in the master process since we need
  # reloadability.
  def em_client_class
    case klass = Rainbows::O[:em_client_class]
    when Proc
      klass.call # e.g.: proc { Cramp::WebSocket::Rainbows }
    when Symbol, String
      eval(klass.to_s) # Object.const_get won't resolve multi-level paths
    else # @use should be either :EventMachine or :NeverBlock
      Rainbows.const_get(@use).const_get(:Client)
    end
  end

  # runs inside each forked worker, this sits around and waits
  # for connections and doesn't die until the parent dies (or is
  # given a INT, QUIT, or TERM signal)
  def worker_loop(worker) # :nodoc:
    init_worker_process(worker)
    server = Rainbows.server
    server.app.respond_to?(:deferred?) and
      server.app = TryDefer.new(server.app)

    # enable them both, should be non-fatal if not supported
    EM.epoll
    EM.kqueue
    logger.info "#@use: epoll=#{EM.epoll?} kqueue=#{EM.kqueue?}"
    client_class = em_client_class
    max = worker_connections + LISTENERS.size
    Rainbows::EventMachine::Server.const_set(:MAX, max)
    Rainbows::EventMachine::Server.const_set(:CL, client_class)
    Rainbows::EventMachine::Client.const_set(:APP, Rainbows.server.app)
    EM.run {
      conns = EM.instance_variable_get(:@conns) or
        raise RuntimeError, "EM @conns instance variable not accessible!"
      Rainbows::EventMachine::Server.const_set(:CUR, conns)
      Rainbows.at_quit do
        EM.next_tick do
          LISTENERS.clear
          conns.each_value do |c|
            case c
            when client_class
              c.quit
            when Rainbows::EventMachine::Server
              c.detach
            end
          end
        end
      end
      EM.add_periodic_timer(1) do
        EM.stop if ! Rainbows.tick && conns.empty? && EM.reactor_running?
      end
      LISTENERS.map! do |s|
        EM.watch(s, Rainbows::EventMachine::Server) do |c|
          c.notify_readable = true
        end
      end
    }
    EM.reactor_thread.join if EM.reactor_running?
  end
end
# :enddoc:
require 'rainbows/event_machine/server'
rainbows-5.0.0/lib/rainbows/const.rb0000644000004100000410000000066412641135250017442 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
module Rainbows::Const
end
require 'rainbows/version'
module Rainbows::Const
  include Unicorn::Const

  RACK_DEFAULTS = Unicorn::HttpRequest::DEFAULTS.update({
    "SERVER_SOFTWARE" => "Rainbows! #{RAINBOWS_VERSION}",

    # using the Rev model, we'll automatically chunk pipe and socket objects
    # if they're the response body.  Unset by default.
    # "rainbows.autochunk" => false,
  })
end
rainbows-5.0.0/lib/rainbows/never_block.rb0000644000004100000410000000227212641135250020602 0ustar  www-datawww-data# -*- encoding: binary -*-

# {NeverBlock}[www.espace.com.eg/neverblock/] library that combines
# the EventMachine library with Ruby Fibers.  This includes use of
# Thread-based Fibers under Ruby 1.8.  It currently does NOT support
# a streaming "rack.input" but is compatible with everything else
# EventMachine supports.
#
# === :pool_size vs worker_connections
#
# In your Rainbows! config block, you may specify a Fiber pool size
# to limit your application concurrency (without using Rainbows::AppPool)
# independently of worker_connections.
#
#   Rainbows! do
#     use :NeverBlock, :pool_size => 50
#     worker_connections 100
#   end
#
module Rainbows::NeverBlock
  # :stopdoc:
  extend Rainbows::PoolSize

  # same pool size NB core itself uses
  def self.setup # :nodoc:
    super
    Rainbows::O[:backend] ||= :EventMachine # no Cool.io support, yet
    Rainbows.const_get(Rainbows::O[:backend])
    require "never_block" # require EM first since we need a higher version
  end

  def self.extended(klass)
    klass.extend(Rainbows.const_get(Rainbows::O[:backend])) # EventMachine
    klass.extend(Rainbows::NeverBlock::Core)
  end
  # :startdoc:
end
# :enddoc:
require 'rainbows/never_block/core'
rainbows-5.0.0/lib/rainbows/process_client.rb0000644000004100000410000000505512641135250021327 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
module Rainbows::ProcessClient
  include Rainbows::Response
  include Rainbows::Const

  NULL_IO = Unicorn::HttpRequest::NULL_IO
  IC = Unicorn::HttpRequest.input_class
  Rainbows.config!(self, :client_header_buffer_size, :keepalive_timeout)

  def read_expire
    Rainbows.now + KEEPALIVE_TIMEOUT
  end

  # used for reading headers (respecting keepalive_timeout)
  def timed_read(buf)
    expire = nil
    begin
      case rv = kgio_tryread(CLIENT_HEADER_BUFFER_SIZE, buf)
      when :wait_readable
        return if expire && expire < Rainbows.now
        expire ||= read_expire
        kgio_wait_readable(KEEPALIVE_TIMEOUT)
      else
        return rv
      end
    end while true
  end

  def process_loop
    @hp = hp = Rainbows::HttpParser.new
    kgio_read!(CLIENT_HEADER_BUFFER_SIZE, buf = hp.buf) or return

    begin # loop
      until env = hp.parse
        timed_read(buf2 ||= "") or return
        buf << buf2
      end

      set_input(env, hp)
      env['REMOTE_ADDR'] = kgio_addr
      hp.hijack_setup(to_io)
      status, headers, body = APP.call(env.merge!(RACK_DEFAULTS))

      if 100 == status.to_i
        write("HTTP/1.1 100 Continue\r\n\r\n".freeze)
        env.delete('HTTP_EXPECT'.freeze)
        status, headers, body = APP.call(env)
      end
      return if hp.hijacked?
      write_response(status, headers, body, alive = hp.next?) or return
    end while alive
  # if we get any error, try to write something back to the client
  # assuming we haven't closed the socket, but don't get hung up
  # if the socket is already closed or broken.  We'll always ensure
  # the socket is closed at the end of this function
  rescue => e
    handle_error(e)
  ensure
    close unless closed? || hp.hijacked?
  end

  def handle_error(e)
    Rainbows::Error.write(self, e)
  end

  def set_input(env, hp)
    env['rack.input'] = 0 == hp.content_length ? NULL_IO : IC.new(self, hp)
  end

  def process_pipeline(env, hp)
    begin
      set_input(env, hp)
      env['REMOTE_ADDR'] = kgio_addr
      hp.hijack_setup(to_io)
      status, headers, body = APP.call(env.merge!(RACK_DEFAULTS))
      if 100 == status.to_i
        write("HTTP/1.1 100 Continue\r\n\r\n".freeze)
        env.delete('HTTP_EXPECT'.freeze)
        status, headers, body = APP.call(env)
      end
      return if hp.hijacked?
      write_response(status, headers, body, alive = hp.next?) or return
    end while alive && pipeline_ready(hp)
    alive or close
    rescue => e
      handle_error(e)
  end

  # override this in subclass/module
  def pipeline_ready(hp)
  end
end
rainbows-5.0.0/lib/rainbows/coolio/0000755000004100000410000000000012641135250017245 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/coolio/response_chunk_pipe.rb0000644000004100000410000000056712641135250023645 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
#
# this is class is specific to Coolio for proxying IO-derived objects
class Rainbows::Coolio::ResponseChunkPipe < Rainbows::Coolio::ResponsePipe
  def on_read(data)
    @client.write("#{data.size.to_s(16)}\r\n")
    @client.write(data)
    @client.write("\r\n")
  end

  def on_close
    @client.write("0\r\n\r\n")
    super
  end
end
rainbows-5.0.0/lib/rainbows/coolio/master.rb0000644000004100000410000000074112641135250021067 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
require 'thread'
class Rainbows::Coolio::Master < Coolio::IOWatcher

  def initialize(queue)
    @reader, @writer = Kgio::Pipe.new
    super(@reader)
    @queue = queue
    @wbuf, @rbuf = "\0", "\0"
  end

  def <<(output)
    @queue << output
    @writer.kgio_trywrite(@wbuf)
  end

  def on_readable
    if String === @reader.kgio_tryread(1, @rbuf)
      client, response = @queue.pop
      client.response_write(response)
    end
  end
end
rainbows-5.0.0/lib/rainbows/coolio/client.rb0000644000004100000410000001323312641135250021052 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
class Rainbows::Coolio::Client < Coolio::IO
  include Rainbows::EvCore
  APP = Rainbows.server.app
  CONN = Rainbows::Coolio::CONN
  KATO = Rainbows::Coolio::KATO
  LOOP = Coolio::Loop.default

  def initialize(io)
    CONN[self] = false
    super(io)
    post_init
    @deferred = nil
  end

  def want_more
    enable unless enabled?
  end

  def quit
    super
    close if nil == @deferred && @_write_buffer.empty?
  end

  # override the Coolio::IO#write method try to write directly to the
  # kernel socket buffers to avoid an extra userspace copy if
  # possible.
  def write(buf)
    if @_write_buffer.empty?
      begin
        case rv = @_io.kgio_trywrite(buf)
        when nil
          return enable_write_watcher
        when :wait_writable
          break # fall through to super(buf)
        when String
          buf = rv # retry, skb could grow or been drained
        end
      rescue => e
        return handle_error(e)
      end while true
    end
    super(buf)
  end

  def on_readable
    buf = @_io.kgio_tryread(CLIENT_HEADER_BUFFER_SIZE, RBUF)
    case buf
    when :wait_readable
    when nil # eof
      close
    else
      on_read buf
    end
  rescue Errno::ECONNRESET
    close
  end

  # allows enabling of write watcher even when read watcher is disabled
  def evloop
    LOOP
  end

  def next!
    attached? or return
    @deferred = nil
    enable_write_watcher # trigger on_write_complete
  end

  def timeout?
    if nil == @deferred && @_write_buffer.empty?
      @_io.shutdown
      true
    else
      false
    end
  end

  # used for streaming sockets and pipes
  def stream_response_body(body, io, chunk)
    # we only want to attach to the Coolio::Loop belonging to the
    # main thread in Ruby 1.9
    (chunk ? Rainbows::Coolio::ResponseChunkPipe :
             Rainbows::Coolio::ResponsePipe).new(io, self, body).attach(LOOP)
    @deferred = true
  end

  def hijacked
    CONN.delete(self)
    detach
    nil
  end

  def write_response_path(status, headers, body, alive)
    io = body_to_io(body)
    st = io.stat

    if st.file?
      defer_file(status, headers, body, alive, io, st)
    elsif st.socket? || st.pipe?
      chunk = stream_response_headers(status, headers, alive, body)
      return hijacked if nil == chunk
      stream_response_body(body, io, chunk)
    else
      # char or block device... WTF?
      write_response(status, headers, body, alive)
    end
  end

  def ev_write_response(status, headers, body, alive)
    if body.respond_to?(:to_path)
      body = write_response_path(status, headers, body, alive)
    else
      body = write_response(status, headers, body, alive)
    end
    return hijacked unless body
    return quit unless alive && :close != @state
    @state = :headers
  end

  def app_call input
    KATO.delete(self)
    disable if enabled?
    @env['rack.input'] = input
    @env['REMOTE_ADDR'] = @_io.kgio_addr
    @env['async.callback'] = method(:write_async_response)
    @hp.hijack_setup(@_io)
    status, headers, body = catch(:async) {
      APP.call(@env.merge!(RACK_DEFAULTS))
    }
    return hijacked if @hp.hijacked?

    (nil == status || -1 == status) ? @deferred = true :
        ev_write_response(status, headers, body, @hp.next?)
  end

  def on_write_complete
    case @deferred
    when true then return # #next! will clear this bit
    when nil # fall through
    else
      return if stream_file_chunk(@deferred)
      close_deferred # EOF, fall through
    end

    case @state
    when :close
      close if @_write_buffer.empty?
    when :headers
      if @buf.empty?
        buf = @_io.kgio_tryread(CLIENT_HEADER_BUFFER_SIZE, RBUF) or return close
        String === buf and return on_read(buf)
        # buf == :wait_readable
        unless enabled?
          enable
          KATO[self] = Rainbows.now
        end
      else
        on_read(''.freeze)
      end
    end
    rescue => e
      handle_error(e)
  end

  def handle_error(e)
    close_deferred
    if msg = Rainbows::Error.response(e)
      @_io.kgio_trywrite(msg) rescue nil
    end
    @_write_buffer.clear
    ensure
      quit
  end

  def close_deferred
    if @deferred
      begin
        @deferred.close if @deferred.respond_to?(:close)
      rescue => e
        Unicorn.log_error(Rainbows.server.logger,
                          "closing deferred=#{@deferred.inspect}", e)
      end
      @deferred = nil
    end
  end

  def on_close
    close_deferred
    CONN.delete(self)
    KATO.delete(self)
  end

  if IO.method_defined?(:trysendfile)
    def defer_file(status, headers, body, alive, io, st)
      if r = sendfile_range(status, headers)
        status, headers, range = r
        body = write_headers(status, headers, alive, body) or return hijacked
        range and defer_file_stream(range[0], range[1], io, body)
      else
        write_headers(status, headers, alive, body) or return hijacked
        defer_file_stream(0, st.size, io, body)
      end
      body
    end

    def stream_file_chunk(sf) # +sf+ is a Rainbows::StreamFile object
      case n = @_io.trysendfile(sf, sf.offset, sf.count)
      when Integer
        sf.offset += n
        return if 0 == (sf.count -= n)
      when :wait_writable
        return enable_write_watcher
      else
        return
      end while true
    end
  else
    def defer_file(status, headers, body, alive, io, st)
      write_headers(status, headers, alive, body) or return hijacked
      defer_file_stream(0, st.size, io, body)
      body
    end

    def stream_file_chunk(body)
      buf = body.to_io.read(0x4000) and write(buf)
    end
  end

  def defer_file_stream(offset, count, io, body)
    @deferred = Rainbows::StreamFile.new(offset, count, io, body)
    enable_write_watcher
  end
end
rainbows-5.0.0/lib/rainbows/coolio/core.rb0000644000004100000410000000132012641135250020516 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
module Rainbows::Coolio::Core
  include Rainbows::Base

  # runs inside each forked worker, this sits around and waits
  # for connections and doesn't die until the parent dies (or is
  # given a INT, QUIT, or TERM signal)
  def worker_loop(worker)
    init_worker_process(worker)
    mod = Rainbows.const_get(@use)
    rloop = Rainbows::Coolio::Server.const_set(:LOOP, Coolio::Loop.default)
    Rainbows::Coolio::Server.const_set(:MAX, @worker_connections)
    Rainbows::Coolio::Server.const_set(:CL, mod.const_get(:Client))
    Rainbows::Coolio::Heartbeat.new(1, true).attach(rloop)
    LISTENERS.map! { |s| Rainbows::Coolio::Server.new(s).attach(rloop) }
    rloop.run
  end
end
rainbows-5.0.0/lib/rainbows/coolio/heartbeat.rb0000644000004100000410000000133312641135250021531 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
# This class handles the Unicorn fchmod heartbeat mechanism
# in Coolio-based concurrency models to prevent the master
# process from killing us unless we're blocked.  This class
# will also detect and execute the graceful exit if triggered
# by SIGQUIT
class Rainbows::Coolio::Heartbeat < Coolio::TimerWatcher
  KATO = Rainbows::Coolio::KATO
  CONN = Rainbows::Coolio::CONN
  Rainbows.config!(self, :keepalive_timeout)
  Rainbows.at_quit { KATO.each_key(&:timeout?).clear }

  def on_timer
    if (ot = KEEPALIVE_TIMEOUT) >= 0
      ot = Rainbows.now - ot
      KATO.delete_if { |client, time| time < ot and client.timeout? }
    end
    exit if (! Rainbows.tick && CONN.size <= 0)
  end
end
rainbows-5.0.0/lib/rainbows/coolio/response_pipe.rb0000644000004100000410000000064612641135250022453 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
#
# this is class is specific to Coolio for writing large static files
# or proxying IO-derived objects
class Rainbows::Coolio::ResponsePipe < Coolio::IO
  def initialize(io, client, body)
    super(io)
    @client, @body = client, body
  end

  def on_read(data)
    @client.write(data)
  end

  def on_close
    @body.respond_to?(:close) and @body.close
    @client.next!
  end
end
rainbows-5.0.0/lib/rainbows/coolio/thread_client.rb0000644000004100000410000000174712641135250022410 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:

RUBY_VERSION =~ %r{\A1\.8} and
  warn "Coolio and Threads do not mix well under Ruby 1.8"

class Rainbows::Coolio::ThreadClient < Rainbows::Coolio::Client
  def app_call input
    KATO.delete(self)
    disable if enabled?
    @env['rack.input'] = input
    app_dispatch # must be implemented by subclass
  end

  # this is only called in the master thread
  def response_write(response)
    return hijacked if @hp.hijacked?
    ev_write_response(*response, @hp.next?)
    rescue => e
      handle_error(e)
  end

  # fails-safe application dispatch, we absolutely cannot
  # afford to fail or raise an exception (killing the thread)
  # here because that could cause a deadlock and we'd leak FDs
  def app_response
    begin
      @env['REMOTE_ADDR'] = @_io.kgio_addr
      @hp.hijack_setup(@_io)
      APP.call(@env.merge!(RACK_DEFAULTS))
    rescue => e
      Rainbows::Error.app(e) # we guarantee this does not raise
      [ 500, {}, [] ]
    end
  end
end
rainbows-5.0.0/lib/rainbows/coolio/server.rb0000644000004100000410000000044712641135250021105 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
class Rainbows::Coolio::Server < Coolio::IO
  CONN = Rainbows::Coolio::CONN
  # CL and MAX will be defined in the corresponding worker loop

  def on_readable
    return if CONN.size >= MAX
    io = @_io.kgio_tryaccept and CL.new(io).attach(LOOP)
  end
end
rainbows-5.0.0/lib/rainbows/dev_fd_response.rb0000644000004100000410000000550712641135250021462 0ustar  www-datawww-data# -*- encoding: binary -*-

# Rack response middleware wrapping any IO-like object with an
# OS-level file descriptor associated with it.  May also be used to
# create responses from integer file descriptors or existing +IO+
# objects.  This may be used in conjunction with the #to_path method
# on servers that support it to pass arbitrary file descriptors into
# the HTTP response without additional open(2) syscalls

class Rainbows::DevFdResponse < Struct.new(:app)

  # :stopdoc:
  FD_MAP = Rainbows::FD_MAP
  include Rack::Utils

  # Rack middleware entry point, we'll just pass through responses
  # unless they respond to +to_io+ or +to_path+
  def call(env)
    status, headers, body = response = app.call(env)

    # totally uninteresting to us if there's no body
    if STATUS_WITH_NO_ENTITY_BODY.include?(status.to_i) ||
       File === body ||
       (body.respond_to?(:to_path) && File.file?(body.to_path))
      return response
    end

    io = body.to_io if body.respond_to?(:to_io)
    io ||= File.open(body.to_path) if body.respond_to?(:to_path)
    return response if io.nil?

    headers = Rack::Utils::HeaderHash.new(headers) unless Hash === headers
    st = io.stat
    fileno = io.fileno
    FD_MAP[fileno] = io
    if st.file?
      headers['Content-Length'.freeze] ||= st.size.to_s
      headers.delete('Transfer-Encoding'.freeze)
    elsif st.pipe? || st.socket? # epoll-able things
      unless headers.include?('Content-Length'.freeze)
        if env['rainbows.autochunk']
          case env['HTTP_VERSION']
          when "HTTP/1.0", nil
          else
            headers['Transfer-Encoding'.freeze] = 'chunked'
          end
        else
          env['rainbows.autochunk'] = false
        end
      end

      # we need to make sure our pipe output is Fiber-compatible
      case env['rainbows.model']
      when :FiberSpawn, :FiberPool, :RevFiberSpawn, :CoolioFiberSpawn
        io.respond_to?(:kgio_wait_readable) or
          io = Rainbows::Fiber::IO.new(io)
      when :Revactor
        io = Rainbows::Revactor::Proxy.new(io)
      end
    else # unlikely, char/block device file, directory, ...
      return response
    end
    [ status, headers, Body.new(io, "/dev/fd/#{fileno}", body) ]
  end

  class Body < Struct.new(:to_io, :to_path, :orig_body) # :nodoc:
    # called by the webserver or other middlewares if they can't
    # handle #to_path
    def each
      to_io.each { |x| yield x }
    end

    # remain Rack::Lint-compatible for people with wonky systems :P
    unless File.directory?("/dev/fd")
      alias to_path_orig to_path
      undef_method :to_path
    end

    # called by the web server after #each
    def close
      to_io.close unless to_io.closed?
      orig_body.close if orig_body.respond_to?(:close) # may not be an IO
    rescue IOError # could've been IO::new()'ed and closed
    end
  end
  #:startdoc:
end # class
rainbows-5.0.0/lib/rainbows/xepoll_thread_pool/0000755000004100000410000000000012641135250021644 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/xepoll_thread_pool/client.rb0000644000004100000410000000537212641135250023456 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
# FIXME: lots of duplication from xepolll_thread_spawn/client

module Rainbows::XEpollThreadPool::Client
  Rainbows.config!(self, :keepalive_timeout, :client_header_buffer_size)
  N = Raindrops.new(1)
  ACCEPTORS = Rainbows::HttpServer::LISTENERS.dup
  extend Rainbows::WorkerYield

  def self.included(klass) # included in Rainbows::Client
    max = Rainbows.server.worker_connections
    ACCEPTORS.map! do |sock|
      Thread.new do
        buf = ""
        begin
          if io = sock.kgio_accept(klass)
            N.incr(0, 1)
            io.epoll_once(buf)
          end
          worker_yield while N[0] >= max
        rescue => e
          Rainbows::Error.listen_loop(e)
        end while Rainbows.alive
      end
    end
  end

  def self.app_run(queue)
    while client = queue.pop
      client.run
    end
  end

  QUEUE = Queue.new
  Rainbows::O[:pool_size].times { Thread.new { app_run(QUEUE) } }

  ep = SleepyPenguin::Epoll
  EP = ep.new
  IN = ep::IN | ep::ONESHOT
  KATO = {}.compare_by_identity
  LOCK = Mutex.new
  Rainbows.at_quit do
    clients = nil
    LOCK.synchronize { clients = KATO.keys; KATO.clear }
    clients.each { |io| io.closed? or io.close }
  end
  @@last_expire = Rainbows.now

  def kato_set
    LOCK.synchronize { KATO[self] = @@last_expire }
    EP.set(self, IN)
  end

  def kato_delete
    LOCK.synchronize { KATO.delete self }
  end

  def self.loop
    buf = ""
    begin
      EP.wait(nil, 1000) { |_, obj| obj.epoll_run(buf) }
      expire
    rescue Errno::EINTR
    rescue => e
      Rainbows::Error.listen_loop(e)
    end while Rainbows.tick || N[0] > 0
    Rainbows::JoinThreads.acceptors(ACCEPTORS)
  end

  def self.expire
    return if ((now = Rainbows.now) - @@last_expire) < 1.0
    if (ot = KEEPALIVE_TIMEOUT) >= 0
      ot = now - ot
      defer = []
      LOCK.synchronize do
        KATO.delete_if { |client, time| time < ot and defer << client }
      end
      defer.each { |io| io.closed? or io.shutdown }
    end
    @@last_expire = now
  end

  def epoll_once(buf)
    @hp = Rainbows::HttpParser.new
    epoll_run(buf)
  end

  def close
    super
    kato_delete
    N.decr(0, 1)
    nil
  end

  def handle_error(e)
    super
    ensure
      closed? or close
  end

  def queue!
    QUEUE << self
    false
  end

  def epoll_run(buf)
    case kgio_tryread(CLIENT_HEADER_BUFFER_SIZE, buf)
    when :wait_readable
      return kato_set
    when String
      kato_delete
      @hp.add_parse(buf) and return queue!
    else
      return close
    end while true
    rescue => e
      handle_error(e)
  end

  def run
    process_pipeline(@hp.env, @hp)
  end

  def pipeline_ready(hp)
    # be fair to other clients, let others run first
    hp.parse and return queue!
    epoll_run("")
    false
  end
end
rainbows-5.0.0/lib/rainbows/configurator.rb0000644000004100000410000001620512641135250021014 0ustar  www-datawww-data# -*- encoding: binary -*-

# This module adds \Rainbows! to the
# {Unicorn::Configurator}[http://unicorn.bogomips.org/Unicorn/Configurator.html]
# \Rainbows!-specific configuration options must be inside a the Rainbows!
# block, otherwise Unicorn::Configurator directives may be used anywhere
# in the file.
#
#   Rainbows! do
#     use :ThreadSpawn # concurrency model to use
#     worker_connections 400
#     keepalive_timeout 0 # zero disables keepalives entirely
#     client_max_body_size 5*1024*1024 # 5 megabytes
#     keepalive_requests 666 # default:100
#     client_header_buffer_size 2 * 1024 # 2 kilobytes
#   end
#
#   # the rest of the Unicorn configuration...
#   worker_processes 8
#   stderr_path "/path/to/error.log"
#   stdout_path "/path/to/output.log"
module Rainbows::Configurator
  Unicorn::Configurator::DEFAULTS.merge!({
    use: Rainbows::Base,
    worker_connections: 50,
    keepalive_timeout: 5,
    keepalive_requests: 100,
    client_max_body_size: 1024 * 1024,
    client_header_buffer_size: 1024,
    client_max_header_size: 112 * 1024,
    copy_stream: IO,
  })

  # Configures \Rainbows! with a given concurrency model to +use+ and
  # a +worker_connections+ upper-bound.  This method should be called
  # inside a Unicorn/\Rainbows! configuration file.
  #
  # All other methods in Rainbows::Configurator must be called
  # inside this block.
  def Rainbows!(&block)
    block_given? or raise ArgumentError, "Rainbows! requires a block"
    @block = true
    instance_eval(&block)
    ensure
      @block = false
  end

  def check! # :nodoc:
    @block or abort "must be inside a Rainbows! block"
  end

  # This limits the number of connected clients per-process.  The total
  # number of clients on a server is +worker_processes+ * +worker_connections+.
  #
  # This option has no effect with the Base concurrency model, which is
  # limited to +1+.
  #
  # Default: 50
  def worker_connections(clients)
    check!
    set_int(:worker_connections, clients, 1)
  end

  # Select a concurrency model for use with \Rainbows!.  You must select
  # this with a Symbol (prefixed with ":").  Thus if you wish to select
  # the Rainbows::ThreadSpawn concurrency model, you would use:
  #
  #   Rainbows! do
  #     use :ThreadSpawn
  #   end
  #
  # See the {Summary}[link:Summary.html] document for a summary of
  # supported concurrency models.  +options+ may be specified for some
  # concurrency models, but the majority do not support them.
  #
  # Default: :Base (no concurrency)
  def use(model, *options)
    check!
    mod = begin
      Rainbows.const_get(model)
    rescue NameError => e
      warn "error loading #{model.inspect}: #{e}"
      e.backtrace.each { |l| warn l }
      abort "concurrency model #{model.inspect} not supported"
    end
    Module === mod or abort "concurrency model #{model.inspect} not supported"
    options.each do |opt|
      case opt
      when Hash
        Rainbows::O.merge!(opt)
      when Symbol
        Rainbows::O[opt] = true
      else
        abort "cannot handle option: #{opt.inspect} in #{options.inspect}"
      end
    end
    mod.setup if mod.respond_to?(:setup)
    set[:use] = mod
  end

  # Sets the value (in seconds) the server will wait for a client in
  # between requests.  The default value should be enough under most
  # conditions for browsers to render the page and start retrieving
  # extra elements.
  #
  # Setting this value to +0+ disables keepalive entirely
  #
  # Default: 5 seconds
  def keepalive_timeout(seconds)
    check!
    set_int(:keepalive_timeout, seconds, 0)
  end

  # This limits the number of requests which can be made over a keep-alive
  # connection.  This is used to prevent single client from monopolizing
  # the server and to improve fairness when load-balancing across multiple
  # machines by forcing a client to reconnect.  This may be helpful
  # in mitigating some denial-of-service attacks.
  #
  # Default: 100 requests
  def keepalive_requests(count)
    check!
    case count
    when nil, Integer
      set[:keepalive_requests] = count
    else
      abort "not an integer or nil: keepalive_requests=#{count.inspect}"
    end
  end

  # Limits the maximum size of a request body for all requests.
  # Setting this to +nil+ disables the maximum size check.
  #
  # Default: 1 megabyte (1048576 bytes)
  #
  # If you want endpoint-specific upload limits and use a
  # "rack.input"-streaming concurrency model, see the Rainbows::MaxBody
  def client_max_body_size(bytes)
    check!
    err = "client_max_body_size must be nil or a non-negative Integer"
    case bytes
    when nil
    when Integer
      bytes >= 0 or abort err
    else
      abort err
    end
    set[:client_max_body_size] = bytes
  end

  # Limits the maximum size of a request header for all requests.
  #
  # Default: 112 kilobytes (114688 bytes)
  #
  # Lowering this will lower worst-case memory usage and mitigate some
  # denial-of-service attacks.  This should be larger than
  # client_header_buffer_size.
  def client_max_header_size(bytes)
    check!
    set_int(:client_max_header_size, bytes, 8)
  end

  # This governs the amount of memory allocated for an individual read(2) or
  # recv(2) system call when reading headers.  Applications that make minimal
  # use of cookies should not increase this from the default.
  #
  # Rails applications using session cookies may want to increase this to
  # 2048 bytes or more depending on expected request sizes.
  #
  # Increasing this will increase overall memory usage to your application,
  # as you will need at least this amount of memory for every connected client.
  #
  # Default: 1024 bytes
  def client_header_buffer_size(bytes)
    check!
    set_int(:client_header_buffer_size, bytes, 1)
  end

  # Allows overriding the +klass+ where the +copy_stream+ method is
  # used to do efficient copying of regular files, pipes, and sockets.
  #
  # This is only used with multi-threaded concurrency models:
  #
  # * ThreadSpawn
  # * ThreadPool
  # * WriterThreadSpawn
  # * WriterThreadPool
  # * XEpollThreadSpawn
  # * XEpollThreadPool
  #
  # Due to existing {bugs}[http://redmine.ruby-lang.org/search?q=copy_stream]
  # in the Ruby IO.copy_stream implementation, \Rainbows! uses the
  # "sendfile" RubyGem that instead of copy_stream to transfer regular files
  # to clients.  The "sendfile" RubyGem also supports more operating systems,
  # and works with more concurrency models.
  #
  # Recent Linux 2.6 users may override this with "IO::Splice" from the
  # "io_splice" RubyGem:
  #
  #   require "io/splice"
  #   Rainbows! do
  #     copy_stream IO::Splice
  #   end
  #
  # Keep in mind that splice(2) itself is a relatively new system call
  # and has been buggy in many older Linux kernels.  If you're proxying
  # the output of sockets to the client, be sure to use "io_splice"
  # 4.1.1 or later to avoid stalling responses.
  #
  # Default: IO on Ruby 1.9+, false otherwise
  def copy_stream(klass)
    check!
    if klass && ! klass.respond_to?(:copy_stream)
      abort "#{klass} must respond to `copy_stream' or be `false'"
    end
    set[:copy_stream] = klass
  end
end

# :enddoc:
# inject the Rainbows! method into Unicorn::Configurator
Unicorn::Configurator.__send__(:include, Rainbows::Configurator)
rainbows-5.0.0/lib/rainbows/response.rb0000644000004100000410000001474512641135250020157 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
module Rainbows::Response
  include Unicorn::HttpResponse
  Rainbows.config!(self, :copy_stream)

  # private file class for IO objects opened by Rainbows! itself (and not
  # the app or middleware)
  class F < File; end

  # called after forking
  def self.setup
    Kgio.accept_class = Rainbows::Client
    0 == Rainbows.server.keepalive_timeout and
      Rainbows::HttpParser.keepalive_requests = 0
  end

  def hijack_socket
    @hp.env['rack.hijack'].call
  end

  # returns the original body on success
  # returns nil if the headers hijacked the response body
  def write_headers(status, headers, alive, body)
    @hp.headers? or return body
    hijack = nil
    code = status.to_i
    msg = Rack::Utils::HTTP_STATUS_CODES[code]
    buf = "HTTP/1.1 #{msg ? %Q(#{code} #{msg}) : status}\r\n" \
          "Date: #{httpdate}\r\n"
    headers.each do |key, value|
      case key
      when %r{\A(?:Date|Connection)\z}i
        next
      when "rack.hijack"
        # this was an illegal key in Rack < 1.5, so it should be
        # OK to silently discard it for those older versions
        hijack = value
        alive = false # No persistent connections for hijacking
      else
        if /\n/ =~ value
          # avoiding blank, key-only cookies with /\n+/
          value.split(/\n+/).each { |v| buf << "#{key}: #{v}\r\n" }
        else
          buf << "#{key}: #{value}\r\n"
        end
      end
    end
    write(buf << (alive ? "Connection: keep-alive\r\n\r\n".freeze
                        : "Connection: close\r\n\r\n".freeze))

    if hijack
      body = nil # ensure caller does not close body
      hijack.call(hijack_socket)
    end
    body
  end

  def close_if_private(io)
    io.close if F === io
  end

  def io_for_fd(fd)
    Rainbows::FD_MAP.delete(fd) || F.for_fd(fd)
  end

  # to_io is not part of the Rack spec, but make an exception here
  # since we can conserve path lookups and file descriptors.
  # \Rainbows! will never get here without checking for the existence
  # of body.to_path first.
  def body_to_io(body)
    if body.respond_to?(:to_io)
      body.to_io
    else
      # try to take advantage of Rainbows::DevFdResponse, calling F.open
      # is a last resort
      path = body.to_path
      %r{\A/dev/fd/(\d+)\z} =~ path ? io_for_fd($1.to_i) : F.open(path)
    end
  end

  module Each
    # generic body writer, used for most dynamically-generated responses
    def write_body_each(body)
      body.each { |chunk| write(chunk) }
    end

    # generic response writer, used for most dynamically-generated responses
    # and also when copy_stream and/or IO#trysendfile is unavailable
    def write_response(status, headers, body, alive)
      body = write_headers(status, headers, alive, body)
      write_body_each(body) if body
      body
      ensure
        body.close if body.respond_to?(:close)
    end
  end
  include Each

  if IO.method_defined?(:trysendfile)
    module Sendfile
      def write_body_file(body, range)
        io = body_to_io(body)
        range ? sendfile(io, range[0], range[1]) : sendfile(io, 0)
        ensure
          close_if_private(io)
      end
    end
    include Sendfile
  end

  if COPY_STREAM
    unless IO.method_defined?(:trysendfile)
      module CopyStream
        def write_body_file(body, range)
          # ensure sendfile gets used for SyncClose objects:
          if !body.kind_of?(IO) && body.respond_to?(:to_path)
            body = body.to_path
          end

          range ? COPY_STREAM.copy_stream(body, self, range[1], range[0]) :
                  COPY_STREAM.copy_stream(body, self)
        end
      end
      include CopyStream
    end

    # write_body_stream is an alias for write_body_each if copy_stream
    # isn't used or available.
    def write_body_stream(body)
      COPY_STREAM.copy_stream(io = body_to_io(body), self)
      ensure
        close_if_private(io)
    end
  else # ! COPY_STREAM
    alias write_body_stream write_body_each
  end  # ! COPY_STREAM

  if IO.method_defined?(:trysendfile) || COPY_STREAM
    # This does not support multipart responses (does anybody actually
    # use those?)
    def sendfile_range(status, headers)
      status = status.to_i
      if 206 == status
        if %r{\Abytes (\d+)-(\d+)/\d+\z} =~ headers['Content-Range'.freeze]
          a, b = $1.to_i, $2.to_i
          return 206, headers, [ a,  b - a + 1 ]
        end
        return # wtf...
      end
      200 == status &&
      /\Abytes=(\d+-\d*|\d*-\d+)\z/ =~ @hp.env['HTTP_RANGE'] or
        return
      a, b = $1.split('-'.freeze)

      # HeaderHash is quite expensive, and Rack::File currently
      # uses a regular Ruby Hash with properly-cased headers the
      # same way they're presented in rfc2616.
      headers = Rack::Utils::HeaderHash.new(headers) unless Hash === headers
      clen = headers['Content-Length'.freeze] or return
      size = clen.to_i

      if b.nil? # bytes=M-
        offset = a.to_i
        count = size - offset
      elsif a.empty? # bytes=-N
        offset = size - b.to_i
        count = size - offset
      else  # bytes=M-N
        offset = a.to_i
        count = b.to_i + 1 - offset
      end

      if 0 > count || offset >= size
        headers['Content-Length'.freeze] = "0"
        headers['Content-Range'.freeze] = "bytes */#{clen}"
        return 416, headers, nil
      else
        count = size if count > size
        headers['Content-Length'.freeze] = count.to_s
        headers['Content-Range'.freeze] =
                                    "bytes #{offset}-#{offset+count-1}/#{clen}"
        return 206, headers, [ offset, count ]
      end
    end

    def write_response_path(status, headers, body, alive)
      if File.file?(body.to_path)
        if r = sendfile_range(status, headers)
          status, headers, range = r
          body = write_headers(status, headers, alive, body)
          write_body_file(body, range) if body && range
        else
          body = write_headers(status, headers, alive, body)
          write_body_file(body, nil) if body
        end
      else
        body = write_headers(status, headers, alive, body)
        write_body_stream(body) if body
      end
      body
      ensure
        body.close if body.respond_to?(:close)
    end

    module ToPath
      # returns nil if hijacked
      def write_response(status, headers, body, alive)
        if body.respond_to?(:to_path)
          write_response_path(status, headers, body, alive)
        else
          super
        end
      end
    end
    include ToPath
  end # COPY_STREAM || IO.method_defined?(:trysendfile)
end
rainbows-5.0.0/lib/rainbows/reverse_proxy/0000755000004100000410000000000012641135250020675 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/reverse_proxy/event_machine.rb0000644000004100000410000000240012641135250024023 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
# TODO: handle large responses without having it all in memory
module Rainbows::ReverseProxy::EventMachine
  class Backend < EM::Connection
    include Rainbows::ReverseProxy::EvClient # provides receive_data

    # +addr+ is a packed sockaddr, so it can be either a UNIX or TCP socket
    def initialize(env)
      @env = env
      @rbuf = ""
      @parser = Kcar::Parser.new
      @response = @body = nil
      @headers = Rack::Utils::HeaderHash.new
    end

    # prevents us from sending too much at once and OOM-ing on large uploads
    def stream_input(input)
      if buf = input.read(16384)
        send_data buf
        EM.next_tick { stream_input(input) }
      end
    end

    def on_write_complete
      if @input
        buf = @input.read(16384, @junk) and return write(buf)
        @input = nil
      end
    end

    def unbind
      @env['async.callback'].call(@response || Rainbows::ReverseProxy::E502)
    end
  end

  UpstreamSocket = Rainbows::ReverseProxy::UpstreamSocket
  def call(env)
    input = prepare_input!(env)
    io = UpstreamSocket.start(pick_upstream(env))
    sock = EM.attach(io, Backend, env)
    sock.send_data(build_headers(env, input))
    sock.stream_input(input) if input
    throw :async
  end
end
rainbows-5.0.0/lib/rainbows/reverse_proxy/ev_client.rb0000644000004100000410000000147712641135250023203 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
require 'tempfile'
module Rainbows::ReverseProxy::EvClient
  include Rainbows::ReverseProxy::Synchronous
  CBB = Unicorn::TeeInput.client_body_buffer_size

  def receive_data(buf)
    if @body
      @body << buf
    else
      response = @parser.headers(@headers, @rbuf << buf) or return
      if (cl = @headers['Content-Length'.freeze] && cl.to_i > CBB) ||
         (%r{\bchunked\b} =~ @headers['Transfer-Encoding'.freeze])
        @body = LargeBody.new("")
        @body << @rbuf
        @response = response << @body
      else
        @body = @rbuf.dup
        @response = response << [ @body ]
      end
    end
  end

  class LargeBody < Tempfile
    def each
      buf = ""
      rewind
      while read(16384, buf)
        yield buf
      end
    end

    alias close close!
  end
end
rainbows-5.0.0/lib/rainbows/reverse_proxy/coolio.rb0000644000004100000410000000304412641135250022507 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
# TODO: handle large responses without having it all in memory
module Rainbows::ReverseProxy::Coolio
  LOOP = Cool.io::Loop.default

  class Backend < Cool.io::IO
    include Rainbows::ReverseProxy::EvClient

    def initialize(env, addr, input)
      @env = env
      @input = input
      @junk, @rbuf = "", ""
      @parser = Kcar::Parser.new
      @response = @body = nil
      @headers = Rack::Utils::HeaderHash.new
      super(UpstreamSocket.start(addr)) # kgio-enabled socket
    end

    def on_write_complete
      if @input
        buf = @input.read(16384, @junk) and return write(buf)
        @input = nil
      end
    end

    def on_readable
      # avoiding IO#read_nonblock since that's expensive in 1.9.2
      case buf = @_io.kgio_tryread(16384, @junk)
      when String
        receive_data(buf)
      when :wait_readable
        return
      when nil
        @env['async.callback'].call(@response)
        return close
      end while true # we always read until EAGAIN or EOF

      rescue => e
        case e
        when Errno::ECONNRESET
          @env['async.callback'].call(@response)
          return close
        when SystemCallError
        else
          Unicorn.log_error(@env["rack.logger"], "on_readable", e)
        end
        @env['async.callback'].call(Rainbows::ReverseProxy::E502)
        close
    end
  end

  def call(env)
    input = prepare_input!(env)
    sock = Backend.new(env, pick_upstream(env), input).attach(LOOP)
    sock.write(build_headers(env, input))
    throw :async
  end
end
rainbows-5.0.0/lib/rainbows/reverse_proxy/synchronous.rb0000644000004100000410000000076512641135250023624 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
module Rainbows::ReverseProxy::Synchronous
  UpstreamSocket = Rainbows::ReverseProxy::UpstreamSocket

  def each_block(input)
    buf = ""
    while input.read(16384, buf)
      yield buf
    end
  end

  def call(env)
    input = prepare_input!(env)
    req = build_headers(env, input)
    sock = UpstreamSocket.new(pick_upstream(env))
    sock.write(req)
    each_block(input) { |buf| sock.kgio_write(buf) } if input
    Kcar::Response.new(sock).rack
  end
end
rainbows-5.0.0/lib/rainbows/reverse_proxy/multi_thread.rb0000644000004100000410000000022112641135250023676 0ustar  www-datawww-data# -*- encoding -*-
# :enddoc:
module Rainbows::ReverseProxy::MultiThread
  def pick_upstream(env)
    @lock.synchronize { super(env) }
  end
end
rainbows-5.0.0/lib/rainbows/coolio_thread_pool/0000755000004100000410000000000012641135250021625 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/coolio_thread_pool/watcher.rb0000644000004100000410000000043012641135250023604 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
class Rainbows::CoolioThreadPool::Watcher < Coolio::TimerWatcher
  def initialize(threads)
    @threads = threads
    super(Rainbows.server.timeout, true)
  end

  def on_timer
    @threads.each { |t| t.join(0) and Rainbows.quit! }
  end
end
rainbows-5.0.0/lib/rainbows/coolio_thread_pool/client.rb0000644000004100000410000000031512641135250023427 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
class Rainbows::CoolioThreadPool::Client < Rainbows::Coolio::ThreadClient
  # QUEUE constant will be set in worker_loop
  def app_dispatch
    QUEUE << self
  end
end
rainbows-5.0.0/lib/rainbows/revactor.rb0000644000004100000410000000630612641135250020140 0ustar  www-datawww-data# -*- encoding: binary -*-
require 'revactor'
Revactor::VERSION >= '0.1.5' or abort 'revactor 0.1.5 is required'

# Enables use of the Actor model through {Revactor}[http://revactor.org]
# under Ruby 1.9.
#
# \Revactor dormant upstream, so the use of this is NOT recommended for
# new applications.
#
# It spawns one long-lived Actor for every listen socket in the process
# and spawns a new Actor for every client connection accept()-ed.
# +worker_connections+ will limit the number of client Actors we have
# running at any one time.
#
# Applications using this model are required to be reentrant, but do
# not have to worry about race conditions unless they use threads
# internally.  \Rainbows! does not spawn threads under this model.
# Multiple instances of the same app may run in the same address space
# sequentially (but at interleaved points).  Any network dependencies
# in the application using this model should be implemented using the
# \Revactor library as well, to take advantage of the networking
# concurrency features this model provides.
#
# === RubyGem Requirements
# * revactor 0.1.5 or later
module Rainbows::Revactor
  autoload :Client, 'rainbows/revactor/client'
  autoload :Proxy, 'rainbows/revactor/proxy'

  include Rainbows::Base

  # runs inside each forked worker, this sits around and waits
  # for connections and doesn't die until the parent dies (or is
  # given a INT, QUIT, or TERM signal)
  def worker_loop(worker) #:nodoc:
    Client.setup
    init_worker_process(worker)
    nr = 0
    limit = worker_connections
    actor_exit = Case[:exit, Actor, Object]

    revactorize_listeners.each do |l,close,accept|
      Actor.spawn do
        Actor.current.trap_exit = true
        l.controller = l.instance_variable_set(:@receiver, Actor.current)
        begin
          while nr >= limit
            l.disable if l.enabled?
            logger.info "busy: clients=#{nr} >= limit=#{limit}"
            Actor.receive do |f|
              f.when(close) {}
              f.when(actor_exit) { nr -= 1 }
              f.after(0.01) {} # another listener could've gotten an exit
            end
          end

          l.enable unless l.enabled?
          Actor.receive do |f|
            f.when(close) {}
            f.when(actor_exit) { nr -= 1 }
            f.when(accept) do |_, _, s|
              nr += 1
              Actor.spawn_link(s) { |c| Client.new(c).process_loop }
            end
          end
        rescue => e
          Rainbows::Error.listen_loop(e)
        end while Rainbows.alive
        Actor.receive do |f|
          f.when(close) {}
          f.when(actor_exit) { nr -= 1 }
        end while nr > 0
      end
    end

    Actor.sleep 1 while Rainbows.tick || nr > 0
    rescue Errno::EMFILE
      # ignore, let another worker process take it
  end

  def revactorize_listeners #:nodoc:
    LISTENERS.map do |s|
      case s
      when TCPServer
        l = Revactor::TCP.listen(s, nil)
        [ l, T[:tcp_closed, Revactor::TCP::Socket],
          T[:tcp_connection, l, Revactor::TCP::Socket] ]
      when UNIXServer
        l = Revactor::UNIX.listen(s)
        [ l, T[:unix_closed, Revactor::UNIX::Socket ],
          T[:unix_connection, l, Revactor::UNIX::Socket] ]
      end
    end
  end
  # :startdoc:
end
rainbows-5.0.0/lib/rainbows/coolio_thread_spawn.rb0000644000004100000410000000220712641135250022332 0ustar  www-datawww-data# -*- encoding: binary -*-
# A combination of the Coolio and ThreadSpawn models.  This allows Ruby
# Thread-based concurrency for application processing.  It DOES NOT
# expose a streamable "rack.input" for upload processing within the
# app.  DevFdResponse should be used with this class to proxy
# asynchronous responses.  All network I/O between the client and
# server are handled by the main thread and outside of the core
# application dispatch.
#
# Unlike ThreadSpawn, Cool.io makes this model highly suitable for
# slow clients and applications with medium-to-slow response times
# (I/O bound), but less suitable for sleepy applications.
#
# This concurrency model is designed for Ruby 1.9, and Ruby 1.8
# users are NOT advised to use this due to high CPU usage.
#
# === RubyGem Requirements
# * cool.io 1.0.0 or later
module Rainbows::CoolioThreadSpawn
  include Rainbows::Coolio::Core
  autoload :Client, 'rainbows/coolio_thread_spawn/client'

  def init_worker_process(worker) # :nodoc:
    super
    master = Rainbows::Coolio::Master.new(Queue.new)
    master.attach(Coolio::Loop.default)
    Client.const_set(:MASTER, master)
  end
end
# :enddoc:
rainbows-5.0.0/lib/rainbows/rev_thread_pool.rb0000644000004100000410000000167112641135250021467 0ustar  www-datawww-data# -*- encoding: binary -*-
# :stopdoc:
Rainbows.const_set(:RevThreadPool, Rainbows::CoolioThreadPool)
# :startdoc:

# CoolioThreadPool is the new version of this, use that instead.
#
# A combination of the Rev and ThreadPool models.  This allows Ruby
# Thread-based concurrency for application processing.  It DOES NOT
# expose a streamable "rack.input" for upload processing within the
# app.  DevFdResponse should be used with this class to proxy
# asynchronous responses.  All network I/O between the client and
# server are handled by the main thread and outside of the core
# application dispatch.
#
# Unlike ThreadPool, Rev makes this model highly suitable for
# slow clients and applications with medium-to-slow response times
# (I/O bound), but less suitable for sleepy applications.
#
# This concurrency model is designed for Ruby 1.9, and Ruby 1.8
# users are NOT advised to use this due to high CPU usage.
module Rainbows::RevThreadPool; end
rainbows-5.0.0/lib/rainbows/xepoll_thread_spawn.rb0000644000004100000410000000344412641135250022355 0ustar  www-datawww-data# -*- encoding: binary -*-
require "thread"
require "sleepy_penguin"
require "raindrops"

# This is an edge-triggered epoll concurrency model with blocking
# accept() in a (hopefully) native thread.  This is comparable to
# ThreadSpawn and CoolioThreadSpawn, but is Linux-only and able to exploit
# "wake one" accept() behavior of a blocking accept() call when used
# with native threads.
#
# This supports streaming "rack.input" and allows +:pool_size+ tuning
# independently of +worker_connections+
#
# === Disadvantages
#
# This is only supported under Linux 2.6 and later kernels.
#
# === Compared to CoolioThreadSpawn
#
# This does not buffer outgoing responses in userspace at all, meaning
# it can lower response latency to fast clients and also prevent
# starvation of other clients when reading slow disks for responses
# (when combined with native threads).
#
# CoolioThreadSpawn is likely better for trickling large static files or
# proxying responses to slow clients, but this is likely better for fast
# clients.
#
# Unlikely CoolioThreadSpawn, this supports streaming "rack.input" which
# is useful for reading large uploads from fast clients.
#
# === Compared to ThreadSpawn
#
# This can maintain idle connections without the memory overhead of an
# idle Thread.  The cost of handling/dispatching active connections is
# exactly the same for an equivalent number of active connections.
#
# === RubyGem Requirements
#
# * raindrops 0.6.0 or later
# * sleepy_penguin 3.0.1 or later
module Rainbows::XEpollThreadSpawn
  # :stopdoc:
  include Rainbows::Base

  def init_worker_process(worker)
    super
    require "rainbows/xepoll_thread_spawn/client"
    Rainbows::Client.__send__ :include, Client
  end

  def worker_loop(worker) # :nodoc:
    init_worker_process(worker)
    Client.loop
  end
  # :startdoc:
end
rainbows-5.0.0/lib/rainbows/app_pool.rb0000644000004100000410000000732012641135250020121 0ustar  www-datawww-data# -*- encoding: binary -*-

require 'thread'

# Rack middleware to limit application-level concurrency independently
# of network conncurrency in \Rainbows!   Since the +worker_connections+
# option in \Rainbows! is only intended to limit the number of
# simultaneous clients, this middleware may be used to limit the
# number of concurrent application dispatches independently of
# concurrent clients.
#
# Instead of using M:N concurrency in \Rainbows!, this middleware
# allows M:N:P concurrency where +P+ is the AppPool +:size+ while
# +M+ remains the number of +worker_processes+ and +N+ remains the
# number of +worker_connections+.
#
#   rainbows master
#    \_ rainbows worker[0]
#    |  \_ client[0,0]------\      ___app[0]
#    |  \_ client[0,1]-------\    /___app[1]
#    |  \_ client[0,2]-------->--<       ...
#    |  ...                __/    `---app[P]
#    |  \_ client[0,N]----/
#    \_ rainbows worker[1]
#    |  \_ client[1,0]------\      ___app[0]
#    |  \_ client[1,1]-------\    /___app[1]
#    |  \_ client[1,2]-------->--<       ...
#    |  ...                __/    `---app[P]
#    |  \_ client[1,N]----/
#    \_ rainbows worker[M]
#       \_ client[M,0]------\      ___app[0]
#       \_ client[M,1]-------\    /___app[1]
#       \_ client[M,2]-------->--<       ...
#       ...                __/    `---app[P]
#       \_ client[M,N]----/
#
# AppPool should be used if you want to enforce a lower value of +P+
# than +N+.
#
# AppPool has no effect on the Rev or EventMachine concurrency models
# as those are single-threaded/single-instance as far as application
# concurrency goes.  In other words, +P+ is always +one+ when using
# Rev or EventMachine.  As of \Rainbows! 0.7.0, it is safe to use with
# Revactor and the new FiberSpawn and FiberPool concurrency models.
#
# Since this is Rack middleware, you may load this in your Rack
# config.ru file and even use it in threaded servers other than
# \Rainbows!
#
#   use Rainbows::AppPool, :size => 30
#   map "/lobster" do
#     run Rack::Lobster.new
#   end
#
# You may to load this earlier or later in your middleware chain
# depending on the concurrency/copy-friendliness of your middleware(s).
class Rainbows::AppPool < Struct.new(:pool, :re)

  # +opt+ is a hash, +:size+ is the size of the pool (default: 6)
  # meaning you can have up to 6 concurrent instances of +app+
  # within one \Rainbows! worker process.  We support various
  # methods of the +:copy+ option: +dup+, +clone+, +deep+ and +none+.
  # Depending on your +app+, one of these options should be set.
  # The default +:copy+ is +:dup+ as is commonly seen in existing
  # Rack middleware.
  def initialize(app, opt = {})
    self.pool = Queue.new
    (1...(opt[:size] || 6)).each do
      pool << case (opt[:copy] || :dup)
      when :none then app
      when :dup then app.dup
      when :clone then app.clone
      when :deep then Marshal.load(Marshal.dump(app)) # unlikely...
      else
        raise ArgumentError, "unsupported copy method: #{opt[:copy].inspect}"
      end
    end
    pool << app # the original
  end

  # Rack application endpoint, +env+ is the Rack environment
  def call(env) # :nodoc:

    # we have to do this check at call time (and not initialize)
    # because of preload_app=true and models being changeable with SIGHUP
    # fortunately this is safe for all the reentrant (but not multithreaded)
    # classes that depend on it and a safe no-op for multithreaded
    # concurrency models
    self.re ||= begin
      case env["rainbows.model"]
      when :FiberSpawn, :FiberPool, :Revactor, :NeverBlock,
           :RevFiberSpawn, :CoolioFiberSpawn
        self.pool = Rainbows::Fiber::Queue.new(pool)
      end
      true
    end

    app = pool.shift
    app.call(env)
    ensure
      pool << app
  end
end
rainbows-5.0.0/lib/rainbows/socket_proxy.rb0000644000004100000410000000102612641135250021036 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
#
module Rainbows::SocketProxy
  def kgio_addr
    to_io.kgio_addr
  end

  def kgio_read(size, buf = "")
    to_io.kgio_read(size, buf)
  end

  def kgio_read!(size, buf = "")
    to_io.kgio_read!(size, buf)
  end

  def kgio_trywrite(buf)
    to_io.kgio_trywrite(buf)
  end

  def kgio_tryread(size, buf = "")
    to_io.kgio_tryread(size, buf)
  end

  def kgio_wait_readable(timeout = nil)
    to_io.kgio_wait_readable(timeout)
  end

  def timed_read(buf)
    to_io.timed_read(buf)
  end
end
rainbows-5.0.0/lib/rainbows/reverse_proxy.rb0000644000004100000410000001340612641135250021226 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
require 'socket'
require 'thread'
require 'uri'
require 'kcar' # http://bogomips.org/kcar/ -- gem install kcar

# This is lightly tested and has an unstable configuration interface.
# ***** Do not rely on anything under the ReverseProxy namespace! *****
#
# A reverse proxy implementation for \Rainbows!  It is a Rack application
# compatible and optimized for most \Rainbows! concurrency models.
#
# It makes HTTP/1.0 connections without keepalive to backends, so
# it is only recommended for proxying to upstreams on the same LAN
# or machine.  It can proxy to TCP hosts as well as UNIX domain sockets.
#
# Currently it only does simple round-robin balancing and does not
# know to retry connections from failed backends.
#
# Buffering-behavior is currently dependent on the concurrency model selected:
#
# Fully-buffered (uploads and response bodies):
#    Coolio, EventMachine, NeverBlock, CoolioThreadSpawn, CoolioThreadPool
# If you're proxying to Unicorn, fully-buffered is the way to go.
#
# Buffered input only (uploads, but not response bodies):
#    ThreadSpawn, ThreadPool, FiberSpawn, FiberPool, CoolioFiberSpawn
#
# It is not recommended to use Base, WriterThreadSpawn or WriterThreadPool
# to host this application.  However, you may proxy to a backend running
# one of these concurrency models with a fully-buffering concurrency model.
#
# See the {example config}[link:examples/reverse_proxy.ru] for a sample
# configuration
#
# TODO: Revactor support
# TODO: Support HTTP trailers
# TODO: optional streaming input for synchronous
# TODO: error handling
#
# WARNING! this is only lightly tested and has no automated tests, yet!
class Rainbows::ReverseProxy
  autoload :MultiThread, 'rainbows/reverse_proxy/multi_thread'
  autoload :Synchronous, 'rainbows/reverse_proxy/synchronous'
  autoload :Coolio, 'rainbows/reverse_proxy/coolio'
  autoload :EventMachine, 'rainbows/reverse_proxy/event_machine'
  autoload :EvClient, 'rainbows/reverse_proxy/ev_client'

  E502 = [ 502, [ %w(Content-Length 0), %w(Content-Type text/plain) ], [] ]

  def initialize(opts)
    @lock = Mutex.new
    upstreams = opts[:upstreams]
    @upstreams = []
    upstreams.each do |url|
      url, cfg = *url if Array === url
      if url =~ %r{\Ahttp://}
        uri = URI.parse(url)
        host = uri.host =~ %r{\A\[([a-fA-F0-9:]+)\]\z} ? $1 : uri.host
        sockaddr = Socket.sockaddr_in(uri.port, host)
      else
        path = url.gsub(%r{\Aunix:}, "") # nginx compat
        %r{\A~} =~ path and path = File.expand_path(path)
        sockaddr = Socket.sockaddr_un(path)
      end
      ((cfg && cfg[:weight]) || 1).times { @upstreams << sockaddr }
    end
    @nr = 0
  end

  # detects the concurrency model at first run and replaces itself
  def call(env)
    if @lock.try_lock
      case model = env["rainbows.model"]
      when :EventMachine, :NeverBlock
        extend(EventMachine)
      when :Coolio, :CoolioThreadPool, :CoolioThreadSpawn
        extend(Coolio)
      when :RevFiberSpawn, :Rev, :RevThreadPool, :RevThreadSpawn
        warn "#{model} is not *well* supported with #{self.class}"
        warn "Switch to #{model.to_s.gsub(/Rev/, 'Coolio')}!"
        extend(Synchronous)
      when :Revactor
        warn "Revactor is not *well* supported with #{self.class} yet"
        extend(Synchronous)
      when :FiberSpawn, :FiberPool, :CoolioFiberSpawn
        extend(Synchronous)
        Synchronous::UpstreamSocket.
          __send__(:include, Rainbows::Fiber::IO::Methods)
      when :WriterThreadSpawn, :WriterThreadPool
        warn "#{model} is not recommended for use with #{self.class}"
        extend(Synchronous)
      else
        extend(Synchronous)
      end
      extend(MultiThread) if env["rack.multithread"]
      @lock.unlock
    else
      @lock.synchronize {} # wait for the first locker to finish
    end
    call(env)
  end

  # returns request headers for sending to the upstream as a string
  def build_headers(env, input)
    remote_addr = env['REMOTE_ADDR']
    xff = env['HTTP_X_FORWARDED_FOR']
    xff = xff ? "#{xff},#{remote_addr}" : remote_addr
    req = "#{env['REQUEST_METHOD']} #{env['REQUEST_URI']} HTTP/1.0\r\n" \
          "Connection: close\r\n" \
          "X-Forwarded-For: #{xff}\r\n"
    env.each do |key, value|
      %r{\AHTTP_(\w+)\z} =~ key or next
      key = $1
      next if %r{\A(?:VERSION|CONNECTION|KEEP_ALIVE|X_FORWARDED_FOR)\z}x =~ key
      key.tr!('_'.freeze, '-'.freeze)
      req << "#{key}: #{value}\r\n"
    end
    input and req << (input.respond_to?(:size) ?
                     "Content-Length: #{input.size}\r\n" :
                     "Transfer-Encoding: chunked\r\n".freeze)
    req << "\r\n".freeze
  end

  def pick_upstream(env) # +env+ is reserved for future expansion
    @nr += 1
    @upstreams[@nr %= @upstreams.size]
  end

  def prepare_input!(env)
    if cl = env['CONTENT_LENGTH']
      size = cl.to_i
      size > 0 or return
    elsif %r{\Achunked\z}i =~ env.delete('HTTP_TRANSFER_ENCODING')
      # do people use multiple transfer-encodings?
    else
      return
    end

    input = env['rack.input']
    if input.respond_to?(:rewind)
      if input.respond_to?(:size)
        input.size # TeeInput-specific behavior
        return input
      else
        return SizedInput.new(input, size)
      end
    end
    tmp = size && size < 0x4000 ? StringIO.new("") : Unicorn::TmpIO.new
    each_block(input) { |x| tmp.syswrite(x) }
    tmp.rewind
    tmp
  end

  class SizedInput
    attr_reader :size

    def initialize(input, n)
      buf = ""
      if n == nil
        n = 0
        while input.read(16384, buf)
          n += buf.size
        end
        input.rewind
      end
      @input, @size = input, n
    end

    def read(*args)
      @input.read(*args)
    end
  end

  class UpstreamSocket < Kgio::Socket
    alias readpartial kgio_read!
  end
end
rainbows-5.0.0/lib/rainbows/epoll/0000755000004100000410000000000012641135250017074 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/epoll/response_chunk_pipe.rb0000644000004100000410000000045612641135250023471 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
#
class Rainbows::Epoll::ResponseChunkPipe < Rainbows::Epoll::ResponsePipe
  def tryread
    @io or return

    case rv = super
    when String
      "#{rv.size.to_s(16)}\r\n#{rv}\r\n"
    when nil
      close
      "0\r\n\r\n"
    else
      rv
    end
  end
end
rainbows-5.0.0/lib/rainbows/epoll/client.rb0000644000004100000410000001542412641135250020705 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:

module Rainbows::Epoll::Client

  include Rainbows::EvCore
  APP = Rainbows.server.app
  Server = Rainbows::Epoll::Server
  IN = SleepyPenguin::Epoll::IN | SleepyPenguin::Epoll::ONESHOT
  OUT = SleepyPenguin::Epoll::OUT | SleepyPenguin::Epoll::ONESHOT
  EPINOUT = IN | OUT
  KATO = {}.compare_by_identity
  Rainbows.at_quit { KATO.each_key(&:timeout!).clear }
  Rainbows.config!(self, :keepalive_timeout)
  EP = Rainbows::EP
  @@last_expire = Rainbows.now

  def self.expire
    return if ((now = Rainbows.now) - @@last_expire) < 1.0
    if (ot = KEEPALIVE_TIMEOUT) >= 0
      ot = now - ot
      KATO.delete_if { |client, time| time < ot and client.timeout! }
    end
    @@last_expire = now
  end

  def self.loop
    begin
      EP.wait(nil, 1000) { |_, obj| obj.epoll_run }
      expire
    rescue Errno::EINTR
    rescue => e
      Rainbows::Error.listen_loop(e)
    end while Rainbows.tick || Server.nr > 0
  end

  # only call this once
  def epoll_once
    @wr_queue = [] # may contain String, ResponsePipe, and StreamFile objects
    post_init
    on_readable
    rescue => e
      handle_error(e)
  end

  def on_readable
    case rv = kgio_tryread(CLIENT_HEADER_BUFFER_SIZE, RBUF)
    when String
      on_read(rv)
      return if @wr_queue[0] || closed?
      return hijacked if @hp.hijacked?
    when :wait_readable
      KATO[self] = @@last_expire if :headers == @state
      return EP.set(self, IN)
    else
      break
    end until :close == @state
    close unless closed?
    rescue Errno::ECONNRESET
      close
    rescue IOError
  end

  def app_call input # called by on_read()
    @env['rack.input'] = input
    @env['REMOTE_ADDR'] = kgio_addr
    @hp.hijack_setup(self)
    status, headers, body = APP.call(@env.merge!(RACK_DEFAULTS))
    return hijacked if @hp.hijacked?
    ev_write_response(status, headers, body, @hp.next?)
  end

  def write_response_path(status, headers, body, alive)
    io = body_to_io(body)
    st = io.stat

    if st.file?
      defer_file(status, headers, body, alive, io, st)
    elsif st.socket? || st.pipe?
      chunk = stream_response_headers(status, headers, alive, body)
      return hijacked if nil == chunk
      stream_response_body(body, io, chunk)
    else
      # char or block device... WTF?
      write_response(status, headers, body, alive)
    end
  end

  # used for streaming sockets and pipes
  def stream_response_body(body, io, chunk)
    pipe = (chunk ? Rainbows::Epoll::ResponseChunkPipe :
                    Rainbows::Epoll::ResponsePipe).new(io, self, body)
    return @wr_queue << pipe if @wr_queue[0]
    stream_pipe(pipe) or return
    @wr_queue[0] or @wr_queue << ''.freeze
  end

  def ev_write_response(status, headers, body, alive)
    @state = alive ? :headers : :close
    if body.respond_to?(:to_path)
      write_response_path(status, headers, body, alive)
    else
      write_response(status, headers, body, alive)
    end
    return hijacked if @hp.hijacked?
    # try to read more if we didn't have to buffer writes
    next_request if alive && 0 == @wr_queue.size
  end

  def hijacked
    KATO.delete(self)
    Server.decr # no other place to do this
    EP.delete(self)
    nil
  end

  def next_request
    if 0 == @buf.size
      want_more
    else
      # pipelined request (already in buffer)
      on_read(''.freeze)
      return if @wr_queue[0] || closed?
      return hijacked if @hp.hijacked?
      close if :close == @state
    end
  end

  def epoll_run
    if @wr_queue[0]
      on_writable
    else
      KATO.delete self
      on_readable
    end
  end

  def want_more
    EP.set(self, EPINOUT)
  end

  def on_deferred_write_complete
    :close == @state and return close
    next_request
  end

  def handle_error(e)
    msg = Rainbows::Error.response(e) and kgio_trywrite(msg) rescue nil
    ensure
      close
  end

  def write_deferred(obj)
    Rainbows::StreamFile === obj ? stream_file(obj) : stream_pipe(obj)
  end

  # writes until our write buffer is empty or we block
  # returns true if we're done writing everything
  def on_writable
    obj = @wr_queue.shift

    case rv = String === obj ? kgio_trywrite(obj) : write_deferred(obj)
    when nil
      obj = @wr_queue.shift or return on_deferred_write_complete
    when String
      obj = rv # retry
    when :wait_writable # Strings and StreamFiles only
      @wr_queue.unshift(obj)
      EP.set(self, OUT)
      return
    when :deferred
      return
    end while true
    rescue => e
      handle_error(e)
  end

  def write(buf)
    unless @wr_queue[0]
      case rv = kgio_trywrite(buf)
      when nil
        return # all written
      when String
        buf = rv # retry
      when :wait_writable
        @wr_queue << buf.dup # >3-word 1.9 strings are copy-on-write
        return EP.set(self, OUT)
      end while true
    end
    @wr_queue << buf.dup # >3-word 1.9 strings are copy-on-write
  end

  def close
    @wr_queue.each { |x| x.respond_to?(:close) and x.close rescue nil }
    super
    on_close
  end

  def on_close
    KATO.delete(self)
    Server.decr
  end

  def timeout!
    shutdown
    true
  end

  # Rack apps should not hijack here, but they may...
  def defer_file(status, headers, body, alive, io, st)
    if r = sendfile_range(status, headers)
      status, headers, range = r
      write_headers(status, headers, alive, body) or return hijacked
      range and defer_file_stream(range[0], range[1], io, body)
    else
      write_headers(status, headers, alive, body) or return hijacked
      defer_file_stream(0, st.size, io, body)
    end
  end

  # returns +nil+ on EOF, :wait_writable if the client blocks
  def stream_file(sf) # +sf+ is a Rainbows::StreamFile object
    case n = trysendfile(sf, sf.offset, sf.count)
    when Integer
      sf.offset += n
      0 == (sf.count -= n) and return sf.close
    else
      return n # :wait_writable or nil
    end while true
    rescue
      sf.close
      raise
  end

  def defer_file_stream(offset, count, io, body)
    sf = Rainbows::StreamFile.new(offset, count, io, body)
    unless @wr_queue[0]
      stream_file(sf) or return
    end
    @wr_queue << sf
    EP.set(self, OUT)
  end

  # this alternates between a push and pull model from the pipe -> client
  # to avoid having too much data in userspace on either end.
  def stream_pipe(pipe)
    case buf = pipe.tryread
    when String
      write(buf)
      if @wr_queue[0]
        # client is blocked on write, client will pull from pipe later
        EP.delete pipe
        @wr_queue << pipe
        EP.set(self, OUT)
        return :deferred
      end
      # continue looping...
    when :wait_readable
      # pipe blocked on read, let the pipe push to the client in the future
      EP.delete self
      EP.set(pipe, IN)
      return :deferred
    else # nil => EOF
      return pipe.close # nil
    end while true
    rescue
      pipe.close
      raise
  end
end
rainbows-5.0.0/lib/rainbows/epoll/response_pipe.rb0000644000004100000410000000115112641135250022272 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
#
class Rainbows::Epoll::ResponsePipe
  attr_reader :io
  alias to_io io
  RBUF = Rainbows::EvCore::RBUF
  EP = Rainbows::EP

  def initialize(io, client, body)
    @io, @client, @body = io, client, body
  end

  def epoll_run
    return close if @client.closed?
    @client.stream_pipe(self) or @client.on_deferred_write_complete
    rescue => e
      close
      @client.handle_error(e)
  end

  def close
    @io or return
    EP.delete self
    @body.respond_to?(:close) and @body.close
    @io = @body = nil
  end

  def tryread
    Kgio.tryread(@io, 16384, RBUF)
  end
end
rainbows-5.0.0/lib/rainbows/epoll/server.rb0000644000004100000410000000136612641135250020735 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
module Rainbows::Epoll::Server
  @@nr = 0
  IN = SleepyPenguin::Epoll::IN | SleepyPenguin::Epoll::ET
  MAX = Rainbows.server.worker_connections
  THRESH = MAX - 1
  LISTENERS = Rainbows::HttpServer::LISTENERS
  EP = Rainbows::EP

  def self.nr
    @@nr
  end

  # rearms all listeners when there's a free slot
  def self.decr
    THRESH == (@@nr -= 1) and LISTENERS.each { |sock| EP.set(sock, IN) }
  end

  def self.extended(sock)
    EP.set(sock, IN)
  end

  def epoll_run
    return EP.delete(self) if @@nr >= MAX
    while io = kgio_tryaccept
      @@nr += 1
      # there's a chance the client never even sees epoll for simple apps
      io.epoll_once
      return EP.delete(self) if @@nr >= MAX
    end
  end
end
rainbows-5.0.0/lib/rainbows/fiber_pool.rb0000644000004100000410000000266712641135250020441 0ustar  www-datawww-data# -*- encoding: binary -*-
require 'rainbows/fiber'

# A Fiber-based concurrency model for Ruby 1.9.  This uses a pool of
# Fibers to handle client IO to run the application and the root Fiber
# for scheduling and connection acceptance.
#
# This concurrency model is difficult to use with existing applications,
# lacks third-party support, and is thus NOT recommended.
#
# The pool size is equal to the number of +worker_connections+.
# Compared to the ThreadPool model, Fibers are very cheap in terms of
# memory usage so you can have more active connections.  This model
# supports a streaming "rack.input" with lightweight concurrency.
# Applications are strongly advised to wrap all slow IO objects
# (sockets, pipes) using the Rainbows::Fiber::IO class whenever
# possible.
module Rainbows::FiberPool
  include Rainbows::Fiber::Base

  def worker_loop(worker) # :nodoc:
    init_worker_process(worker)
    pool = []
    worker_connections.times {
      Fiber.new {
        process(Fiber.yield) while pool << Fiber.current
      }.resume # resume to hit Fiber.yield so it waits on a client
    }
    Rainbows::Fiber::Base.setup(self.class, app)

    begin
      schedule do |l|
        fib = pool.pop or break # let another worker process take it
        if io = l.kgio_tryaccept
          fib.resume(io)
        else
          pool << fib
        end
      end
    rescue => e
      Rainbows::Error.listen_loop(e)
    end while Rainbows.cur_alive
  end
end
rainbows-5.0.0/lib/rainbows/rev_thread_spawn.rb0000644000004100000410000000164512641135250021647 0ustar  www-datawww-data# -*- encoding: binary -*-
Rainbows.const_set(:RevThreadSpawn, Rainbows::CoolioThreadSpawn)

# CoolioThreadPool is the new version of this, use that instead.
#
# A combination of the Rev and ThreadSpawn models.  This allows Ruby
# Thread-based concurrency for application processing.  It DOES NOT
# expose a streamable "rack.input" for upload processing within the
# app.  DevFdResponse should be used with this class to proxy
# asynchronous responses.  All network I/O between the client and
# server are handled by the main thread and outside of the core
# application dispatch.
#
# Unlike ThreadSpawn, Rev makes this model highly suitable for
# slow clients and applications with medium-to-slow response times
# (I/O bound), but less suitable for sleepy applications.
#
# This concurrency model is designed for Ruby 1.9, and Ruby 1.8
# users are NOT advised to use this due to high CPU usage.
module Rainbows::RevThreadSpawn; end
rainbows-5.0.0/lib/rainbows/ev_core.rb0000644000004100000410000000777212641135250017745 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
# base module for evented models like Rev and EventMachine
module Rainbows::EvCore
  include Rainbows::Const
  include Rainbows::Response
  NULL_IO = Unicorn::HttpRequest::NULL_IO
  HttpParser = Rainbows::HttpParser
  autoload :CapInput, 'rainbows/ev_core/cap_input'
  RBUF = ""
  Rainbows.config!(self, :client_header_buffer_size)

  # Apps may return this Rack response: AsyncResponse = [ -1, {}, [] ]

  def write_async_response(response)
    status, headers, body = response
    if alive = @hp.next?
      # we can't do HTTP keepalive without Content-Length or
      # "Transfer-Encoding: chunked", and the async.callback stuff
      # isn't Rack::Lint-compatible, so we have to enforce it here.
      headers = Rack::Utils::HeaderHash.new(headers) unless Hash === headers
      alive = headers.include?('Content-Length'.freeze) ||
              !!(%r{\Achunked\z}i =~ headers['Transfer-Encoding'.freeze])
    end
    @deferred = nil
    ev_write_response(status, headers, body, alive)
  end

  def post_init
    @hp = HttpParser.new
    @env = @hp.env
    @buf = @hp.buf
    @state = :headers # [ :body [ :trailers ] ] :app_call :close
  end

  # graceful exit, like SIGQUIT
  def quit
    @state = :close
  end

  def want_more
  end

  def handle_error(e)
    msg = Rainbows::Error.response(e) and write(msg)
    ensure
      quit
  end

  # returns whether to enable response chunking for autochunk models
  # returns nil if request was hijacked in response stage
  def stream_response_headers(status, headers, alive, body)
    headers = Rack::Utils::HeaderHash.new(headers) unless Hash === headers
    if headers.include?('Content-Length'.freeze)
      write_headers(status, headers, alive, body) or return
      return false
    end

    case @env['HTTP_VERSION']
    when "HTTP/1.0" # disable HTTP/1.0 keepalive to stream
      write_headers(status, headers, false, body) or return
      @hp.clear
      false
    when nil # "HTTP/0.9"
      false
    else
      rv = !!(headers['Transfer-Encoding'] =~ %r{\Achunked\z}i)
      rv = false unless @env["rainbows.autochunk"]
      write_headers(status, headers, alive, body) or return
      rv
    end
  end

  def prepare_request_body
    # since we don't do streaming input, we have no choice but
    # to take over 100-continue handling from the Rack application
    if @env['HTTP_EXPECT'] =~ /\A100-continue\z/i
      write("HTTP/1.1 100 Continue\r\n\r\n".freeze)
      @env.delete('HTTP_EXPECT'.freeze)
    end
    @input = mkinput
    @hp.filter_body(@buf2 = "", @buf)
    @input << @buf2
    on_read(''.freeze)
  end

  # TeeInput doesn't map too well to this right now...
  def on_read(data)
    case @state
    when :headers
      @hp.add_parse(data) or return want_more
      @state = :body
      if 0 == @hp.content_length
        app_call NULL_IO # common case
      else # nil or len > 0
        prepare_request_body
      end
    when :body
      if @hp.body_eof?
        if @hp.content_length
          @input.rewind
          app_call @input
        else
          @state = :trailers
          on_read(data)
        end
      elsif data.size > 0
        @hp.filter_body(@buf2, @buf << data)
        @input << @buf2
        on_read(''.freeze)
      else
        want_more
      end
    when :trailers
      if @hp.add_parse(data)
        @input.rewind
        app_call @input
      else
        want_more
      end
    end
    rescue => e
      handle_error(e)
  end

  def err_413(msg)
    write("HTTP/1.1 413 Request Entity Too Large\r\n\r\n".freeze)
    quit
    # zip back up the stack
    raise IOError, msg, []
  end

  TmpIO = Unicorn::TmpIO
  CBB = Unicorn::TeeInput.client_body_buffer_size

  def io_for(bytes)
    bytes <= CBB ? StringIO.new("") : TmpIO.new
  end

  def mkinput
    max = Rainbows.server.client_max_body_size
    len = @hp.content_length
    if len
      if max && (len > max)
        err_413("Content-Length too big: #{len} > #{max}")
      end
      io_for(len)
    else
      max ? CapInput.new(io_for(max), self, max) : TmpIO.new
    end
  end
end
rainbows-5.0.0/lib/rainbows/client.rb0000644000004100000410000000031212641135250017560 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:

# this class is used for most synchronous concurrency models
class Rainbows::Client < Kgio::Socket
  include Rainbows::ProcessClient

  alias write kgio_write
end
rainbows-5.0.0/lib/rainbows/coolio.rb0000644000004100000410000000334412641135250017576 0ustar  www-datawww-data# -*- encoding: binary -*-
require 'rainbows/coolio_support'

# Implements a basic single-threaded event model with
# {Cool.io}[http://coolio.github.com/].  It is capable of handling
# thousands of simultaneous client connections, but with only a
# single-threaded app dispatch.  It is suited for slow clients and
# fast applications (applications that do not have slow network
# dependencies) or applications that use DevFdResponse for deferrable
# response bodies.  It does not require your Rack application to be
# thread-safe, reentrancy is only required for the DevFdResponse body
# generator.
#
# Compatibility: Whatever Cool.io itself supports, currently Ruby
# 1.8/1.9.
#
# This model does not implement as streaming "rack.input" which
# allows the Rack application to process data as it arrives.  This
# means "rack.input" will be fully buffered in memory or to a
# temporary file before the application is entered.
#
# This model is mostly compatible with users of "async.callback" in
# the Rack environment as long as they do not depend on EventMachine.
#
# === RubyGem Requirements
# * cool.io 1.0.0 or later
module Rainbows::Coolio
  # :stopdoc:
  # keep-alive timeout scoreboard
  KATO = {}.compare_by_identity

  # all connected clients
  CONN = {}.compare_by_identity

  autoload :Client, 'rainbows/coolio/client'
  autoload :Master, 'rainbows/coolio/master'
  autoload :ThreadClient, 'rainbows/coolio/thread_client'
  autoload :ResponsePipe, 'rainbows/coolio/response_pipe'
  autoload :ResponseChunkPipe, 'rainbows/coolio/response_chunk_pipe'
  autoload :Heartbeat, 'rainbows/coolio/heartbeat'
  # :startdoc:
end
# :enddoc:
require 'rainbows/coolio/server'
require 'rainbows/coolio/core'
Rainbows::Coolio.__send__ :include, Rainbows::Coolio::Core
rainbows-5.0.0/lib/rainbows/fiber/0000755000004100000410000000000012641135250017050 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/fiber/coolio/0000755000004100000410000000000012641135250020334 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/fiber/coolio/methods.rb0000644000004100000410000000171612641135250022331 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
module Rainbows::Fiber::Coolio::Methods
  class Watcher < Coolio::IOWatcher
    def initialize(fio, flag)
      @f = Fiber.current
      super(fio, flag)
      attach(Coolio::Loop.default)
    end

    def on_readable
      @f.resume
    end

    alias on_writable on_readable
  end

  def close
    @w.detach if defined?(@w) && @w.attached?
    @r.detach if defined?(@r) && @r.attached?
    super
  end

  def kgio_wait_writable
    @w = Watcher.new(self, :w) unless defined?(@w)
    @w.enable unless @w.enabled?
    Fiber.yield
    @w.disable
  end

  def kgio_wait_readable(timeout = nil)
    @r = Watcher.new(self, :r) unless defined?(@r)
    @r.enable unless @r.enabled?
    Fiber.yield
    @r.disable
  end
end

[
  Rainbows::Fiber::IO,
  # the next two trigger autoload, ugh, oh well...
  Rainbows::Fiber::IO::Socket,
  Rainbows::Fiber::IO::Pipe
].each do |klass|
  klass.__send__(:include, Rainbows::Fiber::Coolio::Methods)
end
rainbows-5.0.0/lib/rainbows/fiber/coolio/heartbeat.rb0000644000004100000410000000065712641135250022630 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
class Rainbows::Fiber::Coolio::Heartbeat < Coolio::TimerWatcher
  # ZZ gets populated by read_expire in rainbows/fiber/io/methods
  ZZ = Rainbows::Fiber::ZZ
  def on_timer
    exit if (! Rainbows.tick && Rainbows.cur <= 0)
    now = Rainbows.now
    fibs = []
    ZZ.delete_if { |fib, time| now >= time ? fibs << fib : ! fib.alive? }
    fibs.each { |fib| fib.resume if fib.alive? }
  end
end
rainbows-5.0.0/lib/rainbows/fiber/coolio/sleeper.rb0000644000004100000410000000041712641135250022322 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
class Rainbows::Fiber::Coolio::Sleeper < Coolio::TimerWatcher

  def initialize(seconds)
    @f = Fiber.current
    super(seconds, false)
    attach(Coolio::Loop.default)
    Fiber.yield
  end

  def on_timer
    @f.resume
  end
end
rainbows-5.0.0/lib/rainbows/fiber/coolio/server.rb0000644000004100000410000000072012641135250022166 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
class Rainbows::Fiber::Coolio::Server < Coolio::IOWatcher
  def to_io
    @io
  end

  def initialize(io)
    @io = io
    super(self, :r)
  end

  def close
    detach if attached?
    @io.close
  end

  def on_readable
    return if Rainbows.cur >= MAX
    c = @io.kgio_tryaccept and Fiber.new { process(c) }.resume
  end

  def process(io)
    Rainbows.cur += 1
    io.process_loop
  ensure
    Rainbows.cur -= 1
  end
end
rainbows-5.0.0/lib/rainbows/fiber/io/0000755000004100000410000000000012641135250017457 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/fiber/io/compat.rb0000644000004100000410000000040212641135250021263 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
#
# Used to make Rainbows::Fiber::IO behave like 0.97.0 and earlier
module Rainbows::Fiber::IO::Compat
  def initialize(io, fiber = Fiber.current)
    @to_io, @f = io, fiber
  end

  def close
    @to_io.close
  end
end
rainbows-5.0.0/lib/rainbows/fiber/io/methods.rb0000644000004100000410000000207212641135250021450 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:

# this is used to augment Kgio::Socket and Kgio::Pipe-enhanced classes
# for use with Rainbows!  Do no use this directly, see
# Rainbows::Fiber::IO::Pipe and Rainbows::Fiber::IO::Socket instead.
module Rainbows::Fiber::IO::Methods
  RD = Rainbows::Fiber::RD
  WR = Rainbows::Fiber::WR
  ZZ = Rainbows::Fiber::ZZ
  attr_accessor :f

  def read_expire
    ZZ[Fiber.current] = super
  end

  # for wrapping output response bodies
  def each
    if buf = kgio_read(16384)
      yield buf
      yield buf while kgio_read(16384, buf)
    end
    self
  end

  def close
    fd = fileno
    RD[fd] = WR[fd] = nil
    super
  end

  def kgio_wait_readable(timeout = nil)
    fd = fileno
    @f = Fiber.current
    RD[fd] = self
    Fiber.yield
    ZZ.delete @f
    RD[fd] = nil
  end

  def kgio_wait_writable
    fd = fileno
    @f = Fiber.current
    WR[fd] = self
    Fiber.yield
    WR[fd] = nil
  end

  def self.included(klass)
    if klass.method_defined?(:kgio_write)
      klass.__send__(:alias_method, :write, :kgio_write)
    end
  end
end
rainbows-5.0.0/lib/rainbows/fiber/io/socket.rb0000644000004100000410000000105512641135250021275 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
#
# A Fiber-aware Socket class, gives users the illusion of a synchronous
# interface that yields away from the current Fiber whenever
# the underlying descriptor is blocked on reads or write.
#
# It's not recommended to use any of this in your applications
# unless you're willing to accept breakage.  Most of this is very
# difficult-to-use, fragile and we don't have much time to devote to
# supporting these in the future.
class Rainbows::Fiber::IO::Socket < Kgio::Socket
  include Rainbows::Fiber::IO::Methods
end
rainbows-5.0.0/lib/rainbows/fiber/io/pipe.rb0000644000004100000410000000104712641135250020743 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
#
# A Fiber-aware Pipe class, gives users the illusion of a synchronous
# interface that yields away from the current Fiber whenever
# the underlying descriptor is blocked on reads or write.
#
# It's not recommended to use any of this in your applications
# unless you're willing to accept breakage.  Most of this is very
# difficult-to-use, fragile and we don't have much time to devote to
# supporting these in the future.
class Rainbows::Fiber::IO::Pipe < Kgio::Pipe
  include Rainbows::Fiber::IO::Methods
end
rainbows-5.0.0/lib/rainbows/fiber/coolio.rb0000644000004100000410000000055212641135250020663 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
require 'rainbows/coolio_support'
require 'rainbows/fiber'
require 'rainbows/fiber/io'

module Rainbows::Fiber::Coolio
  autoload :Heartbeat, 'rainbows/fiber/coolio/heartbeat'
  autoload :Server, 'rainbows/fiber/coolio/server'
  autoload :Sleeper, 'rainbows/fiber/coolio/sleeper'
end
require 'rainbows/fiber/coolio/methods'
rainbows-5.0.0/lib/rainbows/fiber/base.rb0000644000004100000410000000367012641135250020315 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
require 'rainbows/fiber/io'

module Rainbows::Fiber::Base

  include Rainbows::Base

  # :stopdoc:
  RD = Rainbows::Fiber::RD
  WR = Rainbows::Fiber::WR
  ZZ = Rainbows::Fiber::ZZ
  # :startdoc:

  # the scheduler method that powers both FiberSpawn and FiberPool
  # concurrency models.  It times out idle clients and attempts to
  # schedules ones that were blocked on I/O.  At most it'll sleep
  # for one second (returned by the schedule_sleepers method) which
  # will cause it.
  def schedule
    begin
      Rainbows.tick
      t = schedule_sleepers
      ret = select(RD.compact.concat(LISTENERS), WR.compact, nil, t)
    rescue Errno::EINTR
      retry
    rescue Errno::EBADF, TypeError
      LISTENERS.compact!
      raise
    end or return

    # active writers first, then readers
    ret[1].concat(RD.compact & ret[0]).each { |c| c.f.resume }

    # accept is an expensive syscall, filter out listeners we don't want
    (ret[0] & LISTENERS).each { |x| yield x }
  end

  # wakes up any sleepers or keepalive-timeout violators that need to be
  # woken and returns an interval to IO.select on
  def schedule_sleepers
    max = nil
    now = Rainbows.now
    fibs = []
    ZZ.delete_if { |fib, time|
      if now >= time
        fibs << fib
      else
        max = time
        false
      end
    }
    fibs.each(&:resume)

    max_sleep = 1.0 # wake up semi-frequently to prevent SIGKILL from master
    if max
      max -= Rainbows.now
      return 0 if max < 0.0
      return max_sleep if max > max_sleep
      max
    else
      max_sleep
    end
  end

  def process(client)
    Rainbows.cur += 1
    client.process_loop
  ensure
    Rainbows.cur -= 1
    ZZ.delete(client.f)
  end

  def self.setup(klass, app)
    Rainbows::Client.__send__(:include, Rainbows::Fiber::IO::Methods)
    require 'rainbows/fiber/body'
    Rainbows::Client.__send__(:include, Rainbows::Fiber::Body)
    self.const_set(:APP, app)
  end
end
rainbows-5.0.0/lib/rainbows/fiber/body.rb0000644000004100000410000000162012641135250020331 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
# non-portable body handling for Fiber-based concurrency goes here
# this module is required and included in worker processes only
# this is meant to be included _after_ Rainbows::Response::Body
module Rainbows::Fiber::Body # :nodoc:

  # the sendfile 1.1.0+ gem includes IO#trysendfile
  if IO.method_defined?(:trysendfile)
    def write_body_file(body, range)
      sock, n, body = to_io, nil, body_to_io(body)
      offset, count = range ? range : [ 0, body.stat.size ]
      case n = sock.trysendfile(body, offset, count)
      when Integer
        offset += n
        return if 0 == (count -= n)
      when :wait_writable
        kgio_wait_writable
      else # nil
        return
      end while true
      ensure
        close_if_private(body)
    end
  end

  def self.included(klass)
    klass.__send__ :alias_method, :write_body_stream, :write_body_each
  end
end
rainbows-5.0.0/lib/rainbows/fiber/io.rb0000644000004100000410000000512712641135250020011 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
# A \Fiber-aware IO class, gives users the illusion of a synchronous
# interface that yields away from the current \Fiber whenever
# the underlying descriptor is blocked on reads or write
#
# It's not recommended to use any of this in your applications
# unless you're willing to accept breakage.  Most of this is very
# difficult-to-use, fragile and we don't have much time to devote to
# supporting these in the future.
#
# This is a stable, legacy interface and should be preserved for all
# future versions of Rainbows!  However, new apps should use
# Rainbows::Fiber::IO::Socket or Rainbows::Fiber::IO::Pipe instead
# (or better yet, avoid any of the Rainbows::Fiber* stuff).
class Rainbows::Fiber::IO
  attr_accessor :to_io

  # :stopdoc:
  # see Rainbows::Fiber::IO::Compat for initialize implementation
  class << self
    alias :[] :new
  end
  # :startdoc:

  # no longer used internally within Rainbows!, only for compatibility
  def write_nonblock(buf)
    @to_io.write_nonblock(buf)
  end

  def kgio_addr
    @to_io.kgio_addr
  end

  # for wrapping output response bodies
  def each
    buf = readpartial(16384)
    yield buf
    yield buf while readpartial(16384, buf)
    rescue EOFError
      self
  end

  def closed?
    @to_io.closed?
  end

  def fileno
    @to_io.fileno
  end

  def write(buf)
    case rv = Kgio.trywrite(@to_io, buf)
    when String
      buf = rv
    when :wait_writable
      kgio_wait_writable
    end until nil == rv
  end

  # used for reading headers (respecting keepalive_timeout)
  def timed_read(buf)
    expire = nil
    case rv = Kgio.tryread(@to_io, 16384, buf)
    when :wait_readable
      return if expire && expire < Rainbows.now
      expire ||= read_expire
      kgio_wait_readable
    else
      return rv
    end while true
  end

  def readpartial(length, buf = "")
    case rv = Kgio.tryread(@to_io, length, buf)
    when nil
      raise EOFError, "end of file reached", []
    when :wait_readable
      kgio_wait_readable
    else
      return rv
    end while true
  end

  def kgio_read(*args)
    @to_io.kgio_read(*args)
  end

  def kgio_read!(*args)
    @to_io.kgio_read!(*args)
  end

  def kgio_trywrite(*args)
    @to_io.kgio_trywrite(*args)
  end

  autoload :Socket, 'rainbows/fiber/io/socket'
  autoload :Pipe, 'rainbows/fiber/io/pipe'
end

# :stopdoc:
require 'rainbows/fiber/io/methods'
require 'rainbows/fiber/io/compat'
class Rainbows::Fiber::IO
  include Rainbows::Fiber::IO::Compat
  include Rainbows::Fiber::IO::Methods
  alias_method :wait_readable, :kgio_wait_readable
  alias_method :wait_writable, :kgio_wait_writable
end
rainbows-5.0.0/lib/rainbows/fiber/queue.rb0000644000004100000410000000147312641135250020526 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
#
# a self-sufficient Queue implementation for Fiber-based concurrency
# models.  This requires no external scheduler, so it may be used with
# Revactor as well as FiberSpawn and FiberPool.
class Rainbows::Fiber::Queue < Struct.new(:queue, :waiters)
  def initialize(queue = [], waiters = [])
    # move elements of the Queue into an Array
    if queue.class.name == "Queue"
      queue = queue.length.times.map { queue.pop }
    end
    super queue, waiters
  end

  def shift
    # ah the joys of not having to deal with race conditions
    if queue.empty?
      waiters << Fiber.current
      Fiber.yield
    end
    queue.shift
  end

  def <<(obj)
    queue << obj
    blocked = waiters.shift and blocked.resume
    queue # not quite 100% compatible but no-one's looking :>
  end
end
rainbows-5.0.0/lib/rainbows/thread_spawn.rb0000644000004100000410000000320712641135250020767 0ustar  www-datawww-data# -*- encoding: binary -*-
require 'thread'

# Spawns a new thread for every client connection we accept().  This
# model is recommended for platforms like Ruby (MRI) 1.8 where spawning
# new threads is inexpensive, but still seems to work well enough with
# good native threading implementations such as NPTL under Linux on
# Ruby (MRI/YARV) 1.9
#
# This model should provide a high level of compatibility with all Ruby
# implementations, and most libraries and applications.  Applications
# running under this model should be thread-safe but not necessarily
# reentrant.
#
# If you're using green threads (MRI 1.8) and need to perform DNS lookups,
# consider using the "resolv-replace" library which replaces parts of the
# core Socket package with concurrent DNS lookup capabilities.

module Rainbows::ThreadSpawn
  include Rainbows::Base
  include Rainbows::WorkerYield

  def accept_loop(klass) #:nodoc:
    lock = Mutex.new
    limit = worker_connections
    nr = 0
    LISTENERS.each do |l|
      klass.new do
        begin
          if lock.synchronize { nr >= limit }
            worker_yield
          elsif client = l.kgio_accept
            klass.new(client) do |c|
              begin
                lock.synchronize { nr += 1 }
                c.process_loop
              ensure
                lock.synchronize { nr -= 1 }
              end
            end
          end
        rescue => e
          Rainbows::Error.listen_loop(e)
        end while Rainbows.alive
      end
    end
    sleep 1 while Rainbows.tick || lock.synchronize { nr > 0 }
  end

  def worker_loop(worker) #:nodoc:
    init_worker_process(worker)
    accept_loop(Thread)
  end
end
rainbows-5.0.0/lib/rainbows/version.rb0000644000004100000410000000005412641135250017772 0ustar  www-datawww-dataRainbows::Const::RAINBOWS_VERSION = '5.0.0'
rainbows-5.0.0/lib/rainbows/fiber.rb0000644000004100000410000000226012641135250017375 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
begin
  require 'fiber'
rescue LoadError
  defined?(NeverBlock) or raise
end

# core namespace for all things that use Fibers in \Rainbows!
#
# It's generally not recommended to use any of this in your applications
# unless you're willing to accept breakage.  Most of this is very
# difficult-to-use, fragile and we don't have much time to devote to
# supporting these in the future.
module Rainbows::Fiber

  # blocked readers (key: fileno, value: Rainbows::Fiber::IO object)
  RD = []

  # blocked writers (key: fileno, value: Rainbows::Fiber::IO object)
  WR = []

  # sleeping fibers go here (key: Fiber object, value: wakeup time)
  ZZ = {}

  # puts the current Fiber into uninterruptible sleep for at least
  # +seconds+.  Unlike Kernel#sleep, this it is not possible to sleep
  # indefinitely to be woken up (nobody wants that in a web server,
  # right?).  Calling this directly is deprecated, use
  # Rainbows.sleep(seconds) instead.
  def self.sleep(seconds)
    ZZ[Fiber.current] = Rainbows.now + seconds
    Fiber.yield
  end

  autoload :Base, 'rainbows/fiber/base'
  autoload :Queue, 'rainbows/fiber/queue'
  autoload :IO, 'rainbows/fiber/io'
end
rainbows-5.0.0/lib/rainbows/http_server.rb0000644000004100000410000000622712641135250020662 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:

class Rainbows::HttpServer < Unicorn::HttpServer
  attr_accessor :copy_stream
  attr_accessor :worker_connections
  attr_accessor :keepalive_timeout
  attr_accessor :client_header_buffer_size
  attr_accessor :client_max_body_size
  attr_reader :use
  attr_reader :master_pid

  def self.setup(block)
    Rainbows.server.instance_eval(&block)
  end

  def initialize(app, options)
    Rainbows.server = self
    @logger = Unicorn::Configurator::DEFAULTS[:logger]
    super(app, options)
    defined?(@use) or self.use = Rainbows::Base
    @worker_connections ||= 50
    @worker_connections = 1 if @use == :Base
  end

  # Add one second to the timeout since our fchmod heartbeat is less
  # precise (and must be more conservative) than Unicorn does.  We
  # handle many clients per process and can't chmod on every
  # connection we accept without wasting cycles.  That added to the
  # fact that we let clients keep idle connections open for long
  # periods of time means we have to chmod at a fixed interval.
  def timeout=(nr)
    @timeout = nr + 1
  end

  def load_config!
    super
    @worker_connections = 1 if @use == :Base
  end

  def worker_loop(worker)
    Rainbows.forked = true
    orig = method(:worker_loop)
    extend(Rainbows.const_get(@use))
    m = method(:worker_loop)
    orig == m ? super(worker) : worker_loop(worker)
  end

  def spawn_missing_workers
    # 5: std{in,out,err} + heartbeat FD + per-process listener
    nofile = 5 + @worker_connections + LISTENERS.size
    trysetrlimit(:RLIMIT_NOFILE, nofile)

    case @use
    when :ThreadSpawn, :ThreadPool, :ActorSpawn,
         :CoolioThreadSpawn, :RevThreadSpawn,
         :XEpollThreadSpawn, :WriterThreadPool, :WriterThreadSpawn
      trysetrlimit(:RLIMIT_NPROC, @worker_connections + LISTENERS.size + 1)
    when :XEpollThreadPool, :CoolioThreadPool
      trysetrlimit(:RLIMIT_NPROC, Rainbows::O[:pool_size] + LISTENERS.size + 1)
    end
    super
  end

  def trysetrlimit(resource, want)
    var = Process.const_get(resource)
    cur, max = Process.getrlimit(var)
    cur <= want and Process.setrlimit(var, cur = max > want ? max : want)
    if cur == want
      @logger.warn "#{resource} rlim_cur=#{cur} is barely enough"
      @logger.warn "#{svc} may monopolize resources dictated by #{resource}" \
                   " and leave none for your app"
    end
    rescue => e
      @logger.error e.message
      @logger.error "#{resource} needs to be increased to >=#{want} before" \
                    " starting #{svc}"
  end

  def svc
    File.basename($0)
  end

  def use=(mod)
    @use = mod.to_s.split('::')[-1].to_sym
    new_defaults = {
      'rainbows.model' => @use,
      'rack.multithread' => mod.to_s.include?('Thread'),
      'rainbows.autochunk' => [:Coolio,:Rev,:Epoll,:XEpoll,
                               :EventMachine,:NeverBlock].include?(@use),
    }
    Rainbows::Const::RACK_DEFAULTS.update(new_defaults)
  end

  def keepalive_requests=(nr)
    Rainbows::HttpParser.keepalive_requests = nr
  end

  def keepalive_requests
    Rainbows::HttpParser.keepalive_requests
  end

  def client_max_header_size=(bytes)
    Unicorn::HttpParser.max_header_len = bytes
  end
end
rainbows-5.0.0/lib/rainbows/writer_thread_pool/0000755000004100000410000000000012641135250021655 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/writer_thread_pool/client.rb0000644000004100000410000000325712641135250023467 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
# used to wrap a BasicSocket to use with +q+ for all writes
# this is compatible with IO.select
class Rainbows::WriterThreadPool::Client < Struct.new(:to_io, :q)
  include Rainbows::SocketProxy
  include Rainbows::ProcessClient

  module Methods
    def write_body_each(body)
      return if @hp.hijacked?
      q << [ to_io, :write_body_each, body ]
    end

    def write_response_close(status, headers, body, alive)
      to_io.instance_variable_set(:@hp, @hp) # XXX ugh
      return if @hp.hijacked?
      Rainbows::SyncClose.new(body) { |sync_body|
        q << [ to_io, :write_response, status, headers, sync_body, alive ]
      }
    end

    if Rainbows::Response::COPY_STREAM || IO.method_defined?(:trysendfile)
      def write_response(status, headers, body, alive)
        if body.respond_to?(:close)
          write_response_close(status, headers, body, alive)
        elsif body.respond_to?(:to_path)
          write_response_path(status, headers, body, alive)
        else
          super
        end
      end

      def write_body_file(body, range)
        q << [ to_io, :write_body_file, body, range ]
      end

      def write_body_stream(body)
        q << [ to_io, :write_body_stream, body ]
      end
    else # each-only body response
      def write_response(status, headers, body, alive)
        if body.respond_to?(:close)
          write_response_close(status, headers, body, alive)
        else
          super
        end
      end
    end # each-only body response
  end # module Methods
  include Methods

  def write(buf)
    q << [ to_io, buf ]
  end

  def close
    q << [ to_io, :close ]
  end

  def closed?
    to_io.closed?
  end
end
rainbows-5.0.0/lib/rainbows/sendfile.rb0000644000004100000410000000506212641135250020102 0ustar  www-datawww-data# -*- encoding: binary -*-
# This middleware handles X-\Sendfile headers generated by applications
# or middlewares down the stack.  It should be placed at the top
# (outermost layer) of the middleware stack to avoid having its
# +to_path+ method clobbered by another middleware.
#
# This converts X-\Sendfile responses to bodies which respond to the
# +to_path+ method which allows certain concurrency models to serve
# efficiently using sendfile() or similar.  With multithreaded models
# under Ruby 1.9, IO.copy_stream will be used.
#
# This middleware is the opposite of Rack::Sendfile as it
# reverses the effect of Rack:::Sendfile.  Unlike many Ruby
# web servers, some configurations of \Rainbows! are capable of
# serving static files efficiently.
#
# === Compatibility (via IO.copy_stream in Ruby 1.9):
# * ThreadSpawn
# * ThreadPool
# * WriterThreadPool
# * WriterThreadSpawn
#
# === Compatibility (Ruby 1.8 and 1.9)
# * EventMachine
# * NeverBlock (using EventMachine)
#
# DO NOT use this middleware if you're proxying to \Rainbows! with a
# server that understands X-\Sendfile (e.g. Apache, Lighttpd) natively.
#
# This does NOT understand X-Accel-Redirect headers intended for nginx.
# X-Accel-Redirect requires the application to be highly coupled with
# the corresponding nginx configuration, and is thus too complicated to
# be worth supporting.
#
# Example config.ru:
#
#    use Rainbows::Sendfile
#    run lambda { |env|
#      path = "#{Dir.pwd}/random_blob"
#      [ 200,
#        {
#          'X-Sendfile' => path,
#          'Content-Type' => 'application/octet-stream'
#        },
#        []
#      ]
#    }

class Rainbows::Sendfile < Struct.new(:app)

  # Body wrapper, this allows us to fall back gracefully to
  # +each+ in case a given concurrency model does not optimize
  # +to_path+ calls.
  class Body < Struct.new(:to_path) # :nodoc: all
    def self.new(path, headers)
      unless headers['Content-Length'.freeze]
        stat = File.stat(path)
        headers['Content-Length'.freeze] = stat.size.to_s if stat.file?
      end
      super(path)
    end

    # fallback in case our +to_path+ doesn't get handled for whatever reason
    def each
      buf = ''
      File.open(to_path) do |fp|
        yield buf while fp.read(0x4000, buf)
      end
    end
  end

  def call(env) # :nodoc:
    status, headers, body = app.call(env)
    headers = Rack::Utils::HeaderHash.new(headers) unless Hash === headers
    if path = headers.delete('X-Sendfile'.freeze)
      body = Body.new(path, headers) unless body.respond_to?(:to_path)
    end
    [ status, headers, body ]
  end
end
rainbows-5.0.0/lib/rainbows/rev.rb0000644000004100000410000000172312641135250017105 0ustar  www-datawww-data# -*- encoding: binary -*-
Rainbows.const_set(:Rev, Rainbows::Coolio)
# Coolio is the new version of this, use that instead.
#
# Implements a basic single-threaded event model with Rev.
# It is capable of handling
# thousands of simultaneous client connections, but with only a
# single-threaded app dispatch.  It is suited for slow clients and
# fast applications (applications that do not have slow network
# dependencies) or applications that use DevFdResponse for deferrable
# response bodies.  It does not require your Rack application to be
# thread-safe, reentrancy is only required for the DevFdResponse body
# generator.
#
# Compatibility: Whatever \Rev itself supports, currently Ruby
# 1.8/1.9.
#
# This model does not implement as streaming "rack.input" which
# allows the Rack application to process data as it arrives.  This
# means "rack.input" will be fully buffered in memory or to a
# temporary file before the application is entered.
module Rainbows::Rev; end
rainbows-5.0.0/lib/rainbows/ev_core/0000755000004100000410000000000012641135250017403 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/ev_core/cap_input.rb0000644000004100000410000000074012641135250021713 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
class Rainbows::EvCore::CapInput

  def initialize(io, client, max)
    @io, @client, @bytes_left = io, client, max
  end

  def <<(buf)
    if (@bytes_left -= buf.size) < 0
      @io.close
      @client.err_413("chunked request body too big")
    end
    @io << buf
  end

  def gets; @io.gets; end
  def each; @io.each { |x| yield x }; end
  def size; @io.size; end
  def rewind; @io.rewind; end
  def read(*args); @io.read(*args); end
end
rainbows-5.0.0/lib/rainbows/join_threads.rb0000644000004100000410000000104212641135250020754 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
# This module only gets loaded on shutdown
module Rainbows::JoinThreads

  # blocking acceptor threads must be forced to run
  def self.acceptors(threads)
    expire = Rainbows.now + Rainbows.server.timeout
    threads.delete_if do |thr|
      Rainbows.tick
      begin
        # blocking accept() may not wake up properly
        thr.raise(Errno::EINTR) if Rainbows.now > expire && thr.stop?

        thr.run
        thr.join(0.01)
      rescue
        true
      end
    end until threads.empty?
  end
end
rainbows-5.0.0/lib/rainbows/max_body.rb0000644000004100000410000000521612641135250020114 0ustar  www-datawww-data# -*- encoding: binary -*-

# Middleware used to enforce client_max_body_size for TeeInput users.
#
# There is no need to configure this middleware manually, it will
# automatically be configured for you based on the client_max_body_size
# setting.
#
# For more fine-grained control, you may also define it per-endpoint in
# your Rack config.ru like this:
#
#        map "/limit_1M" do
#          use Rainbows::MaxBody, 1024*1024
#          run MyApp
#        end
#        map "/limit_10M" do
#          use Rainbows::MaxBody, 1024*1024*10
#          run MyApp
#        end
#
# This is only compatible with concurrency models that expose a streaming
# "rack.input" to the Rack application.  Thus it is NOT compatible with
# any of the following as they fully buffer the request body before
# the application dispatch:
#
# * :Coolio
# * :CoolioThreadPool
# * :CoolioThreadSpawn
# * :Epoll
# * :EventMachine
# * :NeverBlock
# * :Rev
# * :RevThreadPool
# * :RevThreadSpawn
# * :XEpoll
#
# However, the global Rainbows::Configurator#client_max_body_size is compatible
# with all concurrency models \Rainbows! supports.
class Rainbows::MaxBody

  # This is automatically called when used with Rack::Builder#use
  def initialize(app, limit = nil)
    case limit
    when Integer, nil
    else
      raise ArgumentError, "limit not an Integer"
    end
    @app, @limit = app, limit
  end

  # our main Rack middleware endpoint
  def call(env)
    @limit = Rainbows.server.client_max_body_size if nil == @limit
    catch(:rainbows_EFBIG) do
      len = env['CONTENT_LENGTH']
      if len && len.to_i > @limit
        return err
      elsif /\Achunked\z/i =~ env['HTTP_TRANSFER_ENCODING']
        limit_input!(env)
      end
      @app.call(env)
    end || err
  end

  # this is called after forking, so it won't ever affect the master
  # if it's reconfigured
  def self.setup # :nodoc:
    Rainbows.server.client_max_body_size or return
    case Rainbows.server.use
    when :Rev, :Coolio, :EventMachine, :NeverBlock,
         :RevThreadSpawn, :RevThreadPool,
         :CoolioThreadSpawn, :CoolioThreadPool,
         :Epoll, :XEpoll
      return
    end

    # force ourselves to the outermost middleware layer
    Rainbows.server.app = self.new(Rainbows.server.app)
  end

  # Rack response returned when there's an error
  def err # :nodoc:
    [ 413, { 'Content-Length' => '0', 'Content-Type' => 'text/plain' }, [] ]
  end

  def limit_input!(env)
    input = env['rack.input']
    klass = input.respond_to?(:rewind) ? RewindableWrapper : Wrapper
    env['rack.input'] = klass.new(input, @limit)
  end

  # :startdoc:
end
require 'rainbows/max_body/wrapper'
require 'rainbows/max_body/rewindable_wrapper'
rainbows-5.0.0/lib/rainbows/coolio_fiber_spawn.rb0000644000004100000410000000226512641135250022156 0ustar  www-datawww-data# -*- encoding: binary -*-
require 'rainbows/fiber/coolio'

# A combination of the Coolio and FiberSpawn models.
#
# This concurrency model is difficult to use with existing applications,
# lacks third-party support, and is thus NOT recommended.
#
# This allows Ruby 1.9 Fiber-based concurrency for application
# processing while exposing a synchronous execution model and using
# scalable network concurrency provided by Cool.io.  A streaming
# "rack.input" is exposed.  Applications are strongly advised to wrap
# all slow IO objects (sockets, pipes) using the Rainbows::Fiber::IO or
# a Cool.io-compatible class whenever possible.
module Rainbows::CoolioFiberSpawn

  include Rainbows::Base
  include Rainbows::Fiber::Coolio

  def worker_loop(worker) # :nodoc:
    Rainbows::Response.setup
    init_worker_process(worker)
    Server.const_set(:MAX, @worker_connections)
    Rainbows::Fiber::Base.setup(Server, nil)
    Server.const_set(:APP, Rainbows.server.app)
    Heartbeat.new(1, true).attach(Coolio::Loop.default)
    LISTENERS.map! { |s| Server.new(s).attach(Coolio::Loop.default) }
    Rainbows::Client.__send__ :include, Rainbows::Fiber::Coolio::Methods
    Coolio::Loop.default.run
  end
end
rainbows-5.0.0/lib/rainbows/coolio_thread_spawn/0000755000004100000410000000000012641135250022004 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/coolio_thread_spawn/client.rb0000644000004100000410000000037112641135250023610 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
class Rainbows::CoolioThreadSpawn::Client < Rainbows::Coolio::ThreadClient
  # MASTER will be set in worker_loop
  def app_dispatch
    Thread.new(self) { |client| MASTER << [ client, app_response ] }
  end
end
rainbows-5.0.0/lib/rainbows/stream_file.rb0000644000004100000410000000117512641135250020604 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:

# Used to keep track of file offsets in IO#trysendfile + evented
# models.  We always maintain our own file offsets in userspace because
# because sendfile() implementations offer pread()-like idempotency for
# concurrency (multiple clients can read the same underlying file handle).
class Rainbows::StreamFile
  attr_reader :to_io
  attr_accessor :offset, :count

  def initialize(offset, count, io, body)
    @offset, @count, @to_io, @body = offset, count, io, body
  end

  def close
    @body.close if @body.respond_to?(:close)
    @to_io.close unless @to_io.closed?
    @to_io = nil
  end
end
rainbows-5.0.0/lib/rainbows/coolio_support.rb0000644000004100000410000000034712641135250021372 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
begin
  require "coolio"
  Coolio::VERSION >= "1.0.0" or abort "cool.io >= 1.0.0 is required"
rescue LoadError
  require "rev"
  Rev::VERSION >= "0.3.0" or abort "rev >= 0.3.0 is required"
end
rainbows-5.0.0/lib/rainbows/xepoll_thread_pool.rb0000644000004100000410000000507412641135250022177 0ustar  www-datawww-data# -*- encoding: binary -*-
require "thread"
require "sleepy_penguin"
require "raindrops"

# This is an edge-triggered epoll concurrency model with blocking
# accept() in a (hopefully) native thread.  This is comparable to
# ThreadPool and CoolioThreadPool, but is Linux-only and able to exploit
# "wake one" accept() behavior of a blocking accept() call when used
# with native threads.
#
# This supports streaming "rack.input" and allows +:pool_size+ tuning
# independently of +worker_connections+
#
# === Disadvantages
#
# This is only supported under Linux 2.6 and later kernels.
#
# === Compared to CoolioThreadPool
#
# This does not buffer outgoing responses in userspace at all, meaning
# it can lower response latency to fast clients and also prevent
# starvation of other clients when reading slow disks for responses
# (when combined with native threads).
#
# CoolioThreadPool is likely better for trickling large static files or
# proxying responses to slow clients, but this is likely better for fast
# clients.
#
# Unlikely CoolioThreadPool, this supports streaming "rack.input" which
# is useful for reading large uploads from fast clients.
#
# This exposes no special API or extensions on top of Rack.
#
# === Compared to ThreadPool
#
# This can maintain idle connections without the memory overhead of an
# idle Thread.  The cost of handling/dispatching active connections is
# exactly the same for an equivalent number of active connections
# (but independently tunable).
#
# === :pool_size vs worker_connections
#
# Since +:pool_size+ and +worker_connections+ are independently tunable,
# it is possible to get into situations where active connections need
# to wait for an idle thread in the thread pool before being processed
#
# In your Rainbows! config block, you may specify a Thread pool size
# to limit your application concurrency independently of
# worker_connections.
#
#   Rainbows! do
#     use :XEpollThreadPool, :pool_size => 50
#     worker_connections 100
#   end
#
# In extremely rare cases, this may be combined with Rainbows::AppPool
# if you have different concurrency capabilities for different parts of
# your Rack application.
#
# === RubyGem Requirements
#
# * raindrops 0.6.0 or later
# * sleepy_penguin 3.0.1 or later
module Rainbows::XEpollThreadPool
  extend Rainbows::PoolSize

  # :stopdoc:
  include Rainbows::Base

  def init_worker_process(worker)
    super
    require "rainbows/xepoll_thread_pool/client"
    Rainbows::Client.__send__ :include, Client
  end

  def worker_loop(worker) # :nodoc:
    init_worker_process(worker)
    Client.loop
  end
  # :startdoc:
end
rainbows-5.0.0/lib/rainbows/stream_response_epoll/0000755000004100000410000000000012641135250022365 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/stream_response_epoll/client.rb0000644000004100000410000000260012641135250024166 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
class Rainbows::StreamResponseEpoll::Client
  OUT = SleepyPenguin::Epoll::OUT
  N = Raindrops.new(1)
  EP = SleepyPenguin::Epoll.new
  timeout = Rainbows.server.timeout
  thr = Thread.new do
    begin
      EP.wait(nil, timeout) { |_,client| client.epoll_run }
    rescue Errno::EINTR
    rescue => e
      Rainbows::Error.listen_loop(e)
    end while Rainbows.alive || N[0] > 0
  end
  Rainbows.at_quit { thr.join(timeout) }

  attr_reader :to_io

  def initialize(io, unwritten)
    @finish = false
    @to_io = io
    @wr_queue = [ unwritten.dup ]
    EP.set(self, OUT)
  end

  def write(str)
    @wr_queue << str.dup
  end

  def close
    @finish = true
  end

  def hijack(hijack)
    @finish = hijack
  end

  def epoll_run
    return if @to_io.closed?
    buf = @wr_queue.shift or return on_write_complete
    case rv = @to_io.kgio_trywrite(buf)
    when nil
      buf = @wr_queue.shift or return on_write_complete
    when String # retry, socket buffer may grow
      buf = rv
    when :wait_writable
      return @wr_queue.unshift(buf)
    end while true
    rescue => err
      @to_io.close
      N.decr(0, 1)
  end

  def on_write_complete
    if true == @finish
      @to_io.shutdown
      @to_io.close
      N.decr(0, 1)
    elsif @finish.respond_to?(:call) # hijacked
      EP.delete(self)
      N.decr(0, 1)
      @finish.call(@to_io)
    end
  end
end
rainbows-5.0.0/lib/rainbows/pool_size.rb0000644000004100000410000000053312641135250020312 0ustar  www-datawww-data# -*- encoding: binary -*-
# :stopdoc:
module Rainbows::PoolSize
  DEFAULTS = {
    :pool_size => 50, # same as the default worker_connections
  }

  def setup
    o = Rainbows::O
    DEFAULTS.each { |k,v| o[k] ||= v }
    Integer === o[:pool_size] && o[:pool_size] > 0 or
      raise ArgumentError, "pool_size must a be an Integer > 0"
  end
end
rainbows-5.0.0/lib/rainbows/writer_thread_spawn/0000755000004100000410000000000012641135250022034 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/writer_thread_spawn/client.rb0000644000004100000410000000502212641135250023636 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
# used to wrap a BasicSocket to use with +q+ for all writes
# this is compatible with IO.select
class Rainbows::WriterThreadSpawn::Client < Struct.new(:to_io, :q, :thr)
  include Rainbows::SocketProxy
  include Rainbows::ProcessClient
  include Rainbows::WorkerYield

  CUR = {} # :nodoc:

  module Methods
    def write_body_each(body)
      q << [ :write_body_each, body ]
    end

    def write_response_close(status, headers, body, alive)
      to_io.instance_variable_set(:@hp, @hp) # XXX ugh
      Rainbows::SyncClose.new(body) { |sync_body|
        q << [ :write_response, status, headers, sync_body, alive ]
      }
    end

    if Rainbows::Response::COPY_STREAM || IO.method_defined?(:trysendfile)
      def write_response(status, headers, body, alive)
        self.q ||= queue_writer
        if body.respond_to?(:close)
          write_response_close(status, headers, body, alive)
        elsif body.respond_to?(:to_path)
          write_response_path(status, headers, body, alive)
        else
          super
        end
      end

      def write_body_file(body, range)
        q << [ :write_body_file, body, range ]
      end

      def write_body_stream(body)
        q << [ :write_body_stream, body ]
      end
    else # each-only body response
      def write_response(status, headers, body, alive)
        self.q ||= queue_writer
        if body.respond_to?(:close)
          write_response_close(status, headers, body, alive)
        else
          super
        end
      end
    end # each-only body response
  end # module Methods
  include Methods

  def self.quit
    CUR.delete_if do |t,q|
      q << nil
      Rainbows.tick
      t.alive? ? t.join(0.01) : true
    end until CUR.empty?
  end

  def queue_writer
    until CUR.size < MAX
      CUR.delete_if { |t,_|
        t.alive? ? t.join(0) : true
      }.size >= MAX and worker_yield
    end

    q = Queue.new
    self.thr = Thread.new(to_io) do |io|
      while op = q.shift
        begin
          op, *rest = op
          case op
          when String
            io.kgio_write(op)
          when :close
            io.close unless io.closed?
            break
          else
            io.__send__ op, *rest
          end
        rescue => e
          Rainbows::Error.write(io, e)
        end
      end
      CUR.delete(Thread.current)
    end
    CUR[thr] = q
  end

  def write(buf)
    (self.q ||= queue_writer) << buf
  end

  def close
    if q
      q << :close
    else
      to_io.close
    end
  end

  def closed?
    to_io.closed?
  end
end
rainbows-5.0.0/lib/rainbows/queue_pool.rb0000644000004100000410000000120012641135250020454 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
require 'thread'

# Thread pool class based on pulling off a single Ruby Queue.
# This is NOT used for the ThreadPool class, since that class does not
# need a userspace Queue.
class Rainbows::QueuePool
  attr_reader :queue

  def initialize(size = 20)
    q = Queue.new
    @threads = (1..size).map do
      Thread.new do
        while job = q.shift
          yield job
        end
      end
    end
    @queue = q
  end

  def quit!
    @threads.each { |_| @queue << nil }
    @threads.delete_if do |t|
      Rainbows.tick
      t.alive? ? t.join(0.01) : true
    end until @threads.empty?
  end
end
rainbows-5.0.0/lib/rainbows/http_parser.rb0000644000004100000410000000124312641135250020641 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
# avoid modifying Unicorn::HttpParser
class Rainbows::HttpParser < Unicorn::HttpParser
  @keepalive_requests = 100
  class << self
    attr_accessor :keepalive_requests
  end

  def initialize(*args)
    @keepalive_requests = self.class.keepalive_requests
    super
  end

  def next?
    return false if (@keepalive_requests -= 1) <= 0
    super
  end

  def hijack_setup(io)
    @hijack_io = io
    env['rack.hijack'] = self # avoid allocating a new proc this way
  end

  def call # for rack.hijack
    env['rack.hijack_io'] = @hijack_io
  end

  def self.quit
    alias_method :next?, :never!
  end

  def never!
    false
  end
end
rainbows-5.0.0/lib/rainbows/stream_response_epoll.rb0000644000004100000410000000645412641135250022723 0ustar  www-datawww-data# -*- encoding: binary -*-
require "sleepy_penguin"
require "raindrops"

# Like Unicorn itself, this concurrency model is only intended for use
# behind nginx and completely unsupported otherwise.  Even further from
# Unicorn, this isn't even a good idea with normal LAN clients, only nginx!
#
# It does NOT require a thread-safe Rack application at any point, but
# allows streaming data asynchronously via nginx (using the
# "X-Accel-Buffering: no" header to disable buffering).
#
# Unlike Rainbows::Base, this does NOT support persistent
# connections or pipelining.  All \Rainbows! specific configuration
# options are ignored (except Rainbows::Configurator#use).
#
# === RubyGem Requirements
#
# * raindrops 0.6.0 or later
# * sleepy_penguin 3.0.1 or later
module Rainbows::StreamResponseEpoll
  # :stopdoc:
  autoload :Client, "rainbows/stream_response_epoll/client"

  def http_response_write(socket, status, headers, body)
    hijack = ep_client = false

    if headers
      # don't set extra headers here, this is only intended for
      # consuming by nginx.
      code = status.to_i
      msg = Rack::Utils::HTTP_STATUS_CODES[code]
      buf = "HTTP/1.0 #{msg ? %Q(#{code} #{msg}) : status}\r\n"
      headers.each do |key, value|
        case key
        when "rack.hijack"
          hijack = value
          body = nil # ensure we do not close body
        else
          if /\n/ =~ value
            # avoiding blank, key-only cookies with /\n+/
            value.split(/\n+/).each { |v| buf << "#{key}: #{v}\r\n" }
          else
            buf << "#{key}: #{value}\r\n"
          end
        end
      end
      buf << "X-Accel-Buffering: no\r\n\r\n".freeze

      case rv = socket.kgio_trywrite(buf)
      when nil then break
      when String # retry, socket buffer may grow
        buf = rv
      when :wait_writable
        ep_client = Client.new(socket, buf)
        if hijack
          ep_client.hijack(hijack)
        else
          body.each { |chunk| ep_client.write(chunk) }
          ep_client.close
        end
        # body is nil on hijack, in which case ep_client is never closed by us
        return
      end while true
    end

    if hijack
      hijack.call(socket)
      return
    end

    body.each do |chunk|
      if ep_client
        ep_client.write(chunk)
      else
        case rv = socket.kgio_trywrite(chunk)
        when nil then break
        when String # retry, socket buffer may grow
          chunk = rv
        when :wait_writable
          ep_client = Client.new(socket, chunk)
          break
        end while true
      end
    end
  ensure
    return if hijack
    body.respond_to?(:close) and body.close
    if ep_client
      ep_client.close
    else
      socket.shutdown
      socket.close
    end
  end

  # once a client is accepted, it is processed in its entirety here
  # in 3 easy steps: read request, call app, write app response
  def process_client(client)
    status, headers, body = @app.call(env = @request.read(client))

    if 100 == status.to_i
      client.write("HTTP/1.1 100 Continue\r\n\r\n".freeze)
      env.delete('HTTP_EXPECT'.freeze)
      status, headers, body = @app.call(env)
    end
    @request.headers? or headers = nil
    return if @request.hijacked?
    http_response_write(client, status, headers, body)
  rescue => e
    handle_error(client, e)
  end

  # :startdoc:
end
rainbows-5.0.0/lib/rainbows/worker_yield.rb0000644000004100000410000000117712641135250021013 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
module Rainbows::WorkerYield

  # Sleep if we're busy (and let other threads run).  Another less busy
  # worker process may take it for us if we sleep. This is gross but
  # other options still suck because they require expensive/complicated
  # synchronization primitives for _every_ case, not just this unlikely
  # one.  Since this case is (or should be) uncommon, just busy wait
  # when we have to.  We don't use Thread.pass because it needlessly
  # spins the CPU during I/O wait, CPU cycles that can be better used by
  # other worker _processes_.
  def worker_yield
    sleep(0.01)
  end
end
rainbows-5.0.0/lib/rainbows/epoll.rb0000644000004100000410000000325712641135250017430 0ustar  www-datawww-data# -*- encoding: binary -*-
require 'sleepy_penguin'
require 'sendfile'

# Edge-triggered epoll concurrency model using
# {sleepy_penguin}[http://bogomips.org/sleepy_penguin/] for epoll.
#
# Unlike more portable options like Coolio and EventMachine, this
# is Linux-only, but uses edge-triggering instead of level-triggering,
# so it may perform better in some cases.  Coolio and EventMachine have
# better library support and may be widely-used, however.
#
# Consider using XEpoll instead of this if you are using Ruby 1.9,
# it will avoid accept()-scalability issues with many worker processes.
#
# When serving static files, this is extremely unfair and optimized
# for throughput at the expense of fairness.  This is not an issue
# if you're not serving static files, or if your working set is
# small enough to always be in your kernel page cache.  This concurrency
# model may starve clients if you have slow disks and large static files.
#
# Do not use this if you have slow external dependencies.
#
# === RubyGem Requirements
#
# * raindrops 0.6.0 or later
# * sleepy_penguin 3.0.1 or later
# * sendfile 1.1.0 or later
#
module Rainbows::Epoll
  # :stopdoc:
  include Rainbows::Base
  autoload :Server, 'rainbows/epoll/server'
  autoload :Client, 'rainbows/epoll/client'
  autoload :ResponsePipe, 'rainbows/epoll/response_pipe'
  autoload :ResponseChunkPipe, 'rainbows/epoll/response_chunk_pipe'

  def init_worker_process(worker)
    super
    Rainbows.const_set(:EP, SleepyPenguin::Epoll.new)
    Rainbows::Client.__send__ :include, Client
    LISTENERS.each { |io| io.extend(Server) }
  end

  def worker_loop(worker) # :nodoc:
    init_worker_process(worker)
    Client.loop
  end
  # :startdoc:
end
rainbows-5.0.0/lib/rainbows/writer_thread_pool.rb0000644000004100000410000000354712641135250022213 0ustar  www-datawww-data# -*- encoding: binary -*-

# This concurrency model implements a single-threaded app dispatch
# with a separate thread pool for writing responses.
#
# Unlike most \Rainbows! concurrency models, WriterThreadPool is
# designed to run behind nginx just like Unicorn is.  This concurrency
# model may be useful for existing Unicorn users looking for more
# output concurrency than socket buffers can provide while still
# maintaining a single-threaded application dispatch (though if the
# response body is dynamically generated, it must be thread safe).
#
# For serving large or streaming responses, using more threads (via
# the +worker_connections+ setting) and setting "proxy_buffering off"
# in nginx is recommended.  If your application does not handle
# uploads, then using any HTTP-aware proxy like haproxy is fine.
# Using a non-HTTP-aware proxy will leave you vulnerable to
# slow client denial-of-service attacks.
module Rainbows::WriterThreadPool
  # :stopdoc:
  include Rainbows::Base
  autoload :Client, 'rainbows/writer_thread_pool/client'

  @@nr = 0
  @@q = nil

  def process_client(client) # :nodoc:
    @@nr += 1
    Client.new(client, @@q[@@nr %= @@q.size]).process_loop
  end

  def worker_loop(worker) # :nodoc:
    # we have multiple, single-thread queues since we don't want to
    # interleave writes from the same client
    qp = (1..worker_connections).map do |n|
      Rainbows::QueuePool.new(1) do |response|
        begin
          io, arg, *rest = response
          case arg
          when String
            io.kgio_write(arg)
          when :close
            io.close unless io.closed?
          else
            io.__send__(arg, *rest)
          end
        rescue => err
          Rainbows::Error.write(io, err)
        end
      end
    end

    @@q = qp.map(&:queue)
    super(worker) # accept loop from Unicorn
    qp.each(&:quit!)
  end
  # :startdoc:
end
rainbows-5.0.0/lib/rainbows/base.rb0000644000004100000410000000356212641135250017226 0ustar  www-datawww-data# -*- encoding: binary -*-

# base class for \Rainbows! concurrency models, this is currently used by
# ThreadSpawn and ThreadPool models.  Base is also its own
# (non-)concurrency model which is basically Unicorn-with-keepalive, and
# not intended for production use, as keepalive with a pure prefork
# concurrency model is extremely expensive.
module Rainbows::Base
  # :stopdoc:

  def sig_receiver(worker)
    begin
      worker.to_io.kgio_wait_readable
      worker.kgio_tryaccept # Unicorn::Worker#kgio_tryaccept
    rescue => e
      Rainbows.alive or return
      Unicorn.log_error(Rainbows.server.logger, "signal receiver", e)
    end while true
  end

  # this method is called by all current concurrency models
  def init_worker_process(worker) # :nodoc:
    readers = super(worker)
    Rainbows::Response.setup
    Rainbows::MaxBody.setup
    Rainbows.worker = worker

    # spawn Threads since Logger takes a mutex by default and
    # we can't safely lock a mutex in a signal handler
    trap(:USR1) { Thread.new { reopen_worker_logs(worker.nr) } }
    trap(:QUIT) { Thread.new { Rainbows.quit! } }
    [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown
    Rainbows::ProcessClient.const_set(:APP, Rainbows.server.app)
    Thread.new { sig_receiver(worker) }
    logger.info "Rainbows! #@use worker_connections=#@worker_connections"
    Rainbows.readers = readers # for Rainbows.quit
    readers # unicorn 4.8+ needs this
  end

  def process_client(client)
    client.process_loop
  end

  def self.included(klass) # :nodoc:
    klass.const_set :LISTENERS, Rainbows::HttpServer::LISTENERS
  end

  def reopen_worker_logs(worker_nr)
    logger.info "worker=#{worker_nr} reopening logs..."
    Unicorn::Util.reopen_logs
    logger.info "worker=#{worker_nr} done reopening logs"
    rescue
      Rainbows.quit! # let the master reopen and refork us
  end
  # :startdoc:
end
rainbows-5.0.0/lib/rainbows/thread_timeout.rb0000644000004100000410000001672312641135250021334 0ustar  www-datawww-data# -*- encoding: binary -*-
require 'thread'

# Soft timeout middleware for thread-based concurrency models in \Rainbows!
# This timeout only includes application dispatch, and will not take into
# account the (rare) response bodies that are dynamically generated while
# they are being written out to the client.
#
# In your rackup config file (config.ru), the following line will
# cause execution to timeout in 1.5 seconds.
#
#    use Rainbows::ThreadTimeout, :timeout => 1.5
#    run MyApplication.new
#
# You may also specify a threshold, so the timeout does not take
# effect until there are enough active clients.  It does not make
# sense to set a +:threshold+ higher or equal to the
# +worker_connections+ \Rainbows! configuration parameter.
# You may specify a negative threshold to be an absolute
# value relative to the +worker_connections+ parameter, thus
# if you specify a threshold of -1, and have 100 worker_connections,
# ThreadTimeout will only activate when there are 99 active requests.
#
#    use Rainbows::ThreadTimeout, :timeout => 1.5, :threshold => -1
#    run MyApplication.new
#
# This middleware only affects elements below it in the stack, so
# it can be configured to ignore certain endpoints or middlewares.
#
# Timed-out requests will cause this middleware to return with a
# "408 Request Timeout" response.
#
# == Caveats
#
# Badly-written C extensions may not be timed out.  Audit and fix
# (or remove) those extensions before relying on this module.
#
# Do NOT, under any circumstances nest and load this in
# the same middleware stack.  You may load this in parallel in the
# same process completely independent middleware stacks, but DO NOT
# load this twice so it nests.  Things will break!
#
# This will behave badly if system time is changed since Ruby
# does not expose a monotonic clock for users, so don't change
# the system time while this is running.  All servers should be
# running ntpd anyways.
#
# "ensure" clauses may not fire properly or be interrupted during
# execution, so do not mix this module with code which relies on "ensure".
# (This is also true for the "Timeout" module in the Ruby standard library)
#
# "recursive locking" ThreadError exceptions may occur if
# ThreadTimeout fires while a Mutex is locked (because "ensure"
# clauses may not fire properly).

class Rainbows::ThreadTimeout

  # :stopdoc:
  #
  # we subclass Exception to get rid of normal StandardError rescues
  # in app-level code.  timeout.rb does something similar
  ExecutionExpired = Class.new(Exception)

  # The MRI 1.8 won't be usable in January 2038, we'll raise this
  # when we eventually drop support for 1.8 (before 2038, hopefully)
  NEVER = 0x7fffffff

  def initialize(app, opts)
    # @timeout must be Numeric since we add this to Time
    @timeout = opts[:timeout]
    Numeric === @timeout or
      raise TypeError, "timeout=#{@timeout.inspect} is not numeric"

    if @threshold = opts[:threshold]
      Integer === @threshold or
        raise TypeError, "threshold=#{@threshold.inspect} is not an integer"
      @threshold == 0 and raise ArgumentError, "threshold=0 does not make sense"
      @threshold < 0 and @threshold += Rainbows.server.worker_connections
    end
    @app = app

    # This is the main datastructure for communicating Threads eligible
    # for expiration to the watchdog thread.  If the eligible thread
    # completes its job before its expiration time, it will delete itself
    # @active.  If the watchdog thread notices the thread is timed out,
    # the watchdog thread will delete the thread from this hash as it
    # raises the exception.
    #
    # key: Thread to be timed out
    # value: Time of expiration
    @active = {}

    # Protects all access to @active.  It is important since it also limits
    # safe points for asynchronously raising exceptions.
    @lock = Mutex.new

    # There is one long-running watchdog thread that watches @active and
    # kills threads that have been running too long
    # see start_watchdog
    @watchdog = nil
  end

  # entry point for Rack middleware
  def call(env)
    # Once we have this lock, we ensure two things:
    # 1) there is only one watchdog thread started
    # 2) we can't be killed once we have this lock, it's unlikely
    #    to happen unless @timeout is really low and the machine
    #    is very slow.
    @lock.lock

    # we're dead if anything in the next two lines raises, but it's
    # highly unlikely that they will, and anything such as NoMemoryError
    # is hopeless and we might as well just die anyways.
    # initialize guarantees @timeout will be Numeric
    start_watchdog(env) unless @watchdog
    @active[Thread.current] = Rainbows.now + @timeout

    begin
      # It is important to unlock inside this begin block
      # Mutex#unlock really can't fail here since we did a successful
      # Mutex#lock before
      @lock.unlock

      # Once the Mutex was unlocked, we're open to Thread#raise from
      # the watchdog process.  This is the main place we expect to receive
      # Thread#raise.  @app is of course the next layer of the Rack
      # application stack
      @app.call(env)
    ensure
      # I's still possible to receive a Thread#raise here from
      # the watchdog, but that's alright, the "rescue ExecutionExpired"
      # line will catch that.
      @lock.synchronize { @active.delete(Thread.current) }
      # Thread#raise no longer possible here
    end
    rescue ExecutionExpired
      # If we got here, it's because the watchdog thread raised an exception
      # here to kill us.  The watchdog uses @active.delete_if with a lock,
      # so we guaranteed it's
      [ 408, { 'Content-Type' => 'text/plain', 'Content-Length' => '0' }, [] ]
  end

  # The watchdog thread is the one that does the job of killing threads
  # that have expired.
  def start_watchdog(env)
    @watchdog = Thread.new(env["rack.logger"]) do |logger|
      begin
        if @threshold
          # Hash#size is atomic in MRI 1.8 and 1.9 and we
          # expect that from other implementations.
          #
          # Even without a memory barrier, sleep(@timeout) vs
          # sleep(@timeout - time-for-SMP-to-synchronize-a-word)
          # is too trivial to worry about here.
          sleep(@timeout) while @active.size < @threshold
        end

        next_expiry = NEVER

        # We always lock access to @active, so we can't kill threads
        # that are about to release themselves from the eye of the
        # watchdog thread.
        @lock.synchronize do
          now = Rainbows.now
          @active.delete_if do |thread, expire_at|
            # We also use this loop to get the maximum possible time to
            # sleep for if we're not killing the thread.
            if expire_at > now
              next_expiry = expire_at if next_expiry > expire_at
              false
            else
              # Terminate execution and delete this from the @active
              thread.raise(ExecutionExpired)
              true
            end
          end
        end

        # We always try to sleep as long as possible to avoid consuming
        # resources from the app.  So that's the user-configured @timeout
        # value.
        if next_expiry == NEVER
          sleep(@timeout)
        else
          # sleep until the next known thread is about to expire.
          sec = next_expiry - Rainbows.now
          sec > 0.0 ? sleep(sec) : Thread.pass # give other threads a chance
        end
      rescue => e
        # just in case
        logger.error e
      end while true # we run this forever
    end
  end
  # :startdoc:
end
rainbows-5.0.0/lib/rainbows/server_token.rb0000644000004100000410000000170312641135250021015 0ustar  www-datawww-data# -*- encoding: binary -*-
module Rainbows

# An optional middleware to proudly display your usage of \Rainbows! in
# the "Server:" response header.  This means you can help tell the world
# you're using \Rainbows! and spread fun and joy all over the Internet!
#
#    ------ in your config.ru ------
#    require 'rainbows/server_token'
#    require 'rack/lobster'
#    use Rainbows::ServerToken
#    run Rack::Lobster.new
#
# If you're nervous about the exact version of \Rainbows! you're running,
# then you can actually specify anything you want:
#
#    use Rainbows::ServerToken, "netcat 1.0"
#

class ServerToken < Struct.new(:app, :token)

  def initialize(app, token = Const::RACK_DEFAULTS['SERVER_SOFTWARE'])
    super
  end

  def call(env)
    status, headers, body = app.call(env)
    headers = Rack::Utils::HeaderHash.new(headers) unless Hash === headers
    headers['Server'.freeze] = token
    [ status, headers, body ]
  end
  # :startdoc:
end
end
rainbows-5.0.0/lib/rainbows/error.rb0000644000004100000410000000217012641135250017437 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
module Rainbows::Error

  # if we get any error, try to write something back to the client
  # assuming we haven't closed the socket, but don't get hung up
  # if the socket is already closed or broken.  We'll always ensure
  # the socket is closed at the end of this function
  def self.write(io, e)
    msg = response(e) and Kgio.trywrite(io, msg)
    rescue
  end

  def self.app(e)
    Unicorn.log_error(Rainbows.server.logger, "app error", e)
    rescue
  end

  def self.listen_loop(e)
    Rainbows.alive or return
    Unicorn.log_error(Rainbows.server.logger, "listen loop error", e)
    rescue
  end

  def self.response(e)
    case e
    when EOFError, Errno::ECONNRESET, Errno::EPIPE, Errno::EINVAL,
         Errno::EBADF, Errno::ENOTCONN, Errno::ETIMEDOUT, Errno::EHOSTUNREACH
      # swallow error if client shuts down one end or disconnects
    when Unicorn::HttpParserError
      "HTTP/1.1 400 Bad Request\r\n\r\n" # try to tell the client they're bad
    when IOError # HttpParserError is an IOError
    else
      app(e)
      "HTTP/1.1 500 Internal Server Error\r\n\r\n"
    end
  end
end
rainbows-5.0.0/lib/rainbows/fiber_spawn.rb0000644000004100000410000000201712641135250020605 0ustar  www-datawww-data# -*- encoding: binary -*-
require 'rainbows/fiber'

# Simple Fiber-based concurrency model for 1.9.  This spawns a new Fiber
# for every incoming client connection and the root Fiber for scheduling
# and connection acceptance.
#
# This concurrency model is difficult to use with existing applications,
# lacks third-party support, and is thus NOT recommended.
#
# This exports a streaming "rack.input" with lightweight concurrency.
# Applications are strongly advised to wrap all slow IO objects
# (sockets, pipes) using the Rainbows::Fiber::IO class whenever
# possible.
module Rainbows::FiberSpawn
  include Rainbows::Fiber::Base

  def worker_loop(worker) # :nodoc:
    init_worker_process(worker)
    Rainbows::Fiber::Base.setup(self.class, app)
    limit = worker_connections

    begin
      schedule do |l|
        break if Rainbows.cur >= limit
        io = l.kgio_tryaccept or next
        Fiber.new { process(io) }.resume
      end
    rescue => e
      Rainbows::Error.listen_loop(e)
    end while Rainbows.cur_alive
  end
end
rainbows-5.0.0/lib/rainbows/sync_close.rb0000644000004100000410000000112612641135250020447 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
require 'thread'
class Rainbows::SyncClose
  def initialize(body)
    @body = body
    @mutex = Mutex.new
    @cv = ConditionVariable.new
    @mutex.synchronize do
      yield self
      @cv.wait(@mutex)
    end
  end

  def respond_to?(m)
    @body.respond_to?(m)
  end

  def to_path
    @body.to_path
  end

  def each
    @body.each { |x| yield x }
  end

  def to_io
    @body.to_io
  end

  # called by the writer thread to wake up the original thread (in #initialize)
  def close
    @body.close
  ensure
    @mutex.synchronize { @cv.signal }
  end
end
rainbows-5.0.0/lib/rainbows/thread_pool.rb0000644000004100000410000000414512641135250020612 0ustar  www-datawww-data# -*- encoding: binary -*-

# Implements a worker thread pool model.  This is suited for platforms
# like Ruby 1.9, where the cost of dynamically spawning a new thread for
# every new client connection is higher than with the ThreadSpawn model,
# but the cost of an idle thread is low (e.g. NPTL under Linux).
#
# This model should provide a high level of compatibility with all Ruby
# implementations, and most libraries and applications.  Applications
# running under this model should be thread-safe but not necessarily
# reentrant.
#
# Applications using this model are required to be thread-safe.  Threads
# are never spawned dynamically under this model.
#
# If you're using green threads (MRI 1.8) and need to perform DNS lookups,
# consider using the "resolv-replace" library which replaces parts of the
# core Socket package with concurrent DNS lookup capabilities.
module Rainbows::ThreadPool
  include Rainbows::Base

  def worker_loop(worker) # :nodoc:
    init_worker_process(worker)
    pool = (1..worker_connections).map do
      Thread.new { LISTENERS.size == 1 ? sync_worker : async_worker }
    end

    while Rainbows.alive
      # if any worker dies, something is serious wrong, bail
      pool.each do |thr|
        Rainbows.tick or break
        thr.join(1) and Rainbows.quit!
      end
    end
    Rainbows::JoinThreads.acceptors(pool)
  end

  def sync_worker # :nodoc:
    s = LISTENERS[0]
    begin
      c = s.kgio_accept and c.process_loop
    rescue => e
      Rainbows::Error.listen_loop(e)
    end while Rainbows.alive
  end

  def async_worker # :nodoc:
    begin
      # TODO: check if select() or accept() is a problem on large
      # SMP systems under Ruby 1.9.  Hundreds of native threads
      # all working off the same socket could be a thundering herd
      # problem.  On the other hand, a thundering herd may not
      # even incur as much overhead as an extra Mutex#synchronize
      ret = select(LISTENERS) and ret[0].each do |s|
        s = s.kgio_tryaccept and s.process_loop
      end
    rescue Errno::EINTR
    rescue => e
      Rainbows::Error.listen_loop(e)
    end while Rainbows.alive
  end
end
rainbows-5.0.0/lib/rainbows/coolio_thread_pool.rb0000644000004100000410000000424612641135250022160 0ustar  www-datawww-data# -*- encoding: binary -*-

# A combination of the Coolio and ThreadPool models.  This allows Ruby
# Thread-based concurrency for application processing.  It DOES NOT
# expose a streamable "rack.input" for upload processing within the
# app.  DevFdResponse should be used with this class to proxy
# asynchronous responses.  All network I/O between the client and
# server are handled by the main thread and outside of the core
# application dispatch.
#
# Unlike ThreadPool, Cool.io makes this model highly suitable for
# slow clients and applications with medium-to-slow response times
# (I/O bound), but less suitable for sleepy applications.
#
# This concurrency model is designed for Ruby 1.9, and Ruby 1.8
# users are NOT advised to use this due to high CPU usage.
#
# === :pool_size vs worker_connections
#
# In your Rainbows! config block, you may specify a Thread pool size
# to limit your application concurrency independently of
# worker_connections.
#
#   Rainbows! do
#     use :CoolioThreadPool, :pool_size => 50
#     worker_connections 100
#   end
#
# In extremely rare cases, this may be combined with Rainbows::AppPool
# if you have different concurrency capabilities for different parts of
# your Rack application.
#
# === RubyGem Requirements
# * cool.io 1.0.0 or later
module Rainbows::CoolioThreadPool
  # :stopdoc:
  autoload :Client, 'rainbows/coolio_thread_pool/client'
  extend Rainbows::PoolSize
  #:startdoc:
  include Rainbows::Coolio::Core

  def init_worker_threads(master, queue) # :nodoc:
    Rainbows::O[:pool_size].times.map do
      Thread.new do
        begin
          client = queue.pop
          master << [ client, client.app_response ]
        rescue => e
          Rainbows::Error.listen_loop(e)
        end while true
      end
    end
  end

  def init_worker_process(worker) # :nodoc:
    super
    cloop = Coolio::Loop.default
    master = Rainbows::Coolio::Master.new(Queue.new).attach(cloop)
    queue = Client.const_set(:QUEUE, Queue.new)
    threads = init_worker_threads(master, queue)
    Watcher.new(threads).attach(cloop)
    logger.info "CoolioThreadPool pool_size=#{Rainbows::O[:pool_size]}"
  end
end
# :enddoc:
require 'rainbows/coolio_thread_pool/watcher'
rainbows-5.0.0/lib/rainbows/event_machine/0000755000004100000410000000000012641135250020566 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/event_machine/response_chunk_pipe.rb0000644000004100000410000000074312641135250025162 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
module Rainbows::EventMachine::ResponseChunkPipe
  include Rainbows::EventMachine::ResponsePipe

  def unbind
    @client.write("0\r\n\r\n")
    super
  end

  def notify_readable
    case data = Kgio.tryread(@io, 16384, RBUF)
    when String
      @client.write("#{data.size.to_s(16)}\r\n")
      @client.write(data)
      @client.write("\r\n")
    when :wait_readable
      return
    when nil
      return detach
    end while true
  end
end
rainbows-5.0.0/lib/rainbows/event_machine/try_defer.rb0000644000004100000410000000174412641135250023104 0ustar  www-datawww-data# -*- encoding: binary -*-

# Middleware that will run the app dispatch in a separate thread.
# This middleware is automatically loaded by Rainbows! when using
# EventMachine and if the app responds to the +deferred?+ method.
#
# Use EM.threadpool_size in your \Rainbows! config file to control
# the number of threads used by EventMachine.
#
# See http://brainspl.at/articles/2008/04/18/deferred-requests-with-merb-ebb-and-thin
# for more information.
class Rainbows::EventMachine::TryDefer
  def initialize(app) # :nodoc:
    # the entire app becomes multithreaded, even the root (non-deferred)
    # thread since any thread can share processes with others
    Rainbows::Const::RACK_DEFAULTS['rack.multithread'] = true
    @app = app
  end

  def call(env) # :nodoc:
    if @app.deferred?(env)
      EM.defer(proc { catch(:async) { @app.call(env) } }, env['async.callback'])
      # all of the async/deferred stuff breaks Rack::Lint :<
      nil
    else
      @app.call(env)
    end
  end
end
rainbows-5.0.0/lib/rainbows/event_machine/client.rb0000644000004100000410000000721312641135250022374 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
class Rainbows::EventMachine::Client < EM::Connection
  include Rainbows::EvCore
  Rainbows.config!(self, :keepalive_timeout)

  def initialize(io)
    @_io = io
    @deferred = nil
  end

  alias write send_data
  alias hijacked detach

  def receive_data(data)
    # To avoid clobbering the current streaming response
    # (often a static file), we do not attempt to process another
    # request on the same connection until the first is complete
    if @deferred
      if data
        @buf << data
        @_io.shutdown(Socket::SHUT_RD) if @buf.size > 0x1c000
      end
      EM.next_tick { receive_data(nil) } unless @buf.empty?
    else
      on_read(data || ''.freeze) if (@buf.size > 0) || data
    end
  end

  def quit
    super
    close_connection_after_writing if nil == @deferred
  end

  def app_call input
    set_comm_inactivity_timeout 0
    @env['rack.input'] = input
    @env['REMOTE_ADDR'] = @_io.kgio_addr
    @env['async.callback'] = method(:write_async_response)
    @env['async.close'] = EM::DefaultDeferrable.new
    @hp.hijack_setup(@_io)
    status, headers, body = catch(:async) {
      APP.call(@env.merge!(RACK_DEFAULTS))
    }
    return hijacked if @hp.hijacked?

    if (nil == status || -1 == status)
      @deferred = true
    else
      ev_write_response(status, headers, body, @hp.next?)
    end
  end

  def deferred_errback(orig_body)
    @deferred.errback do
      orig_body.close if orig_body.respond_to?(:close)
      @deferred = nil
      quit
    end
  end

  def deferred_callback(orig_body, alive)
    @deferred.callback do
      orig_body.close if orig_body.respond_to?(:close)
      @deferred = nil
      alive ? receive_data(nil) : quit
    end
  end

  def ev_write_response(status, headers, body, alive)
    @state = :headers if alive
    if body.respond_to?(:errback) && body.respond_to?(:callback)
      write_headers(status, headers, alive, body) or return hijacked
      @deferred = body
      write_body_each(body)
      deferred_errback(body)
      deferred_callback(body, alive)
      return
    elsif body.respond_to?(:to_path)
      st = File.stat(path = body.to_path)

      if st.file?
        write_headers(status, headers, alive, body) or return hijacked
        @deferred = stream_file_data(path)
        deferred_errback(body)
        deferred_callback(body, alive)
        return
      elsif st.socket? || st.pipe?
        chunk = stream_response_headers(status, headers, alive, body)
        return hijacked if nil == chunk
        io = body_to_io(@deferred = body)
        m = chunk ? Rainbows::EventMachine::ResponseChunkPipe :
                    Rainbows::EventMachine::ResponsePipe
        return EM.watch(io, m, self).notify_readable = true
      end
      # char or block device... WTF? fall through to body.each
    end
    write_response(status, headers, body, alive) or return hijacked
    if alive
      if @deferred.nil?
        if @buf.empty?
          set_comm_inactivity_timeout(KEEPALIVE_TIMEOUT)
        else
          EM.next_tick { receive_data(nil) }
        end
      end
    else
      quit unless @deferred
    end
  end

  def next!
    @deferred.close if @deferred.respond_to?(:close)
    @deferred = nil
    @hp.keepalive? ? receive_data(nil) : quit
  end

  def unbind
    return if @hp.hijacked?
    async_close = @env['async.close'] and async_close.succeed
    @deferred.respond_to?(:fail) and @deferred.fail
    begin
      @_io.close
    rescue Errno::EBADF
      # EventMachine's EventableDescriptor::Close() may close
      # the underlying file descriptor without invalidating the
      # associated IO object on errors, so @_io.closed? isn't
      # sufficient.
    end
  end
end
rainbows-5.0.0/lib/rainbows/event_machine/response_pipe.rb0000644000004100000410000000107012641135250023764 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
module Rainbows::EventMachine::ResponsePipe
  # garbage avoidance, EM always uses this in a single thread,
  # so a single buffer for all clients will work safely
  RBUF = Rainbows::EvCore::RBUF

  def initialize(client)
    @client = client
  end

  def notify_readable
    case data = Kgio.tryread(@io, 16384, RBUF)
    when String
      @client.write(data)
    when :wait_readable
      return
    when nil
      return detach
    end while true
  end

  def unbind
    @client.next!
    @io.close unless @io.closed?
  end
end
rainbows-5.0.0/lib/rainbows/event_machine/server.rb0000644000004100000410000000053712641135250022426 0ustar  www-datawww-data# -*- encoding: binary -*-
module Rainbows::EventMachine::Server # :nodoc: all
  def close
    detach
    @io.close
  end

  # CL, CUR and MAX will be set when worker_loop starts
  def notify_readable
    return if CUR.size >= MAX
    io = @io.kgio_tryaccept or return
    sig = EM.attach_fd(io.fileno, false)
    CUR[sig] = CL.new(sig, io)
  end
end
rainbows-5.0.0/lib/rainbows/xepoll.rb0000644000004100000410000000156212641135250017615 0ustar  www-datawww-data# -*- encoding: binary -*-
require 'raindrops'
require 'rainbows/epoll'

# Edge-triggered epoll concurrency model with blocking accept() in a
# (hopefully) native thread.  This is just like Epoll, but recommended
# for Ruby 1.9 users as it can avoid accept()-scalability issues on
# multicore machines with many worker processes.
#
# Do not use this if you have slow external dependencies.
#
# === RubyGem Requirements
#
# * raindrops 0.6.0 or later
# * sleepy_penguin 3.0.1 or later
# * sendfile 1.1.0 or later
module Rainbows::XEpoll
  # :stopdoc:
  include Rainbows::Base
  autoload :Client, 'rainbows/xepoll/client'

  def init_worker_process(worker)
    super
    Rainbows.const_set(:EP, SleepyPenguin::Epoll.new)
    Rainbows::Client.__send__ :include, Client
  end

  def worker_loop(worker) # :nodoc:
    init_worker_process(worker)
    Client.loop
  end
  # :startdoc:
end
rainbows-5.0.0/lib/rainbows/xepoll/0000755000004100000410000000000012641135250017264 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/xepoll/client.rb0000644000004100000410000000226212641135250021071 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:

module Rainbows::XEpoll::Client
  N = Raindrops.new(1)
  include Rainbows::Epoll::Client
  ACCEPTORS = Rainbows::HttpServer::LISTENERS.dup
  extend Rainbows::WorkerYield

  def self.included(klass)
    max = Rainbows.server.worker_connections
    ACCEPTORS.map! do |sock|
      Thread.new do
        begin
          if io = sock.kgio_accept(klass)
            N.incr(0, 1)
            io.epoll_once
          end
          worker_yield while N[0] >= max
        rescue => e
          Rainbows::Error.listen_loop(e)
        end while Rainbows.alive
      end
    end
  end

  def self.loop
    begin
      EP.wait(nil, 1000) { |_, obj| obj.epoll_run }
      Rainbows::Epoll::Client.expire
    rescue Errno::EINTR
    rescue => e
      Rainbows::Error.listen_loop(e)
    end while Rainbows.tick || N[0] > 0
    Rainbows::JoinThreads.acceptors(ACCEPTORS)
  end

  # only call this once
  def epoll_once
    @wr_queue = [] # may contain String, ResponsePipe, and StreamFile objects
    post_init
    EP.set(self, IN) # wake up the main thread
    rescue => e
      Rainbows::Error.write(self, e)
  end

  def on_close
    KATO.delete(self)
    N.decr(0, 1)
  end
end
rainbows-5.0.0/lib/rainbows/revactor/0000755000004100000410000000000012641135250017606 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/revactor/proxy.rb0000644000004100000410000000245612641135250021323 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
# Generic IO wrapper for proxying pipe and socket objects
# this behaves more like Rainbows::Fiber::IO than anything,
# making it highly suitable for proxying data from pipes/sockets
class Rainbows::Revactor::Proxy < Rev::IO
  def initialize(io)
    @receiver = Actor.current
    super(io)
    attach(Rev::Loop.default)
  end

  def close
    if @_io
      super
      @_io = nil
    end
  end

  def each
    # when yield-ing, Revactor::TCP#write may raise EOFError
    # (instead of Errno::EPIPE), so we need to limit the rescue
    # to just readpartial and let EOFErrors during yield bubble up
    begin
      buf = readpartial(16384)
    rescue EOFError
      break
    end while yield(buf) || true
  end

  # this may return more than the specified length, Rainbows! won't care...
  def readpartial(length)
    @receiver = Actor.current
    enable if attached? && ! enabled?

    Actor.receive do |filter|
      filter.when(T[:rainbows_io_input, self]) do |_, _, data|
        return data
      end

      filter.when(T[:rainbows_io_closed, self]) do
        raise EOFError, "connection closed"
      end
    end
  end

  def on_close
    @receiver << T[:rainbows_io_closed, self]
  end

  def on_read(data)
    @receiver << T[:rainbows_io_input, self, data ]
    disable
  end
end
rainbows-5.0.0/lib/rainbows/revactor/client.rb0000644000004100000410000000233612641135250021415 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
class Rainbows::Revactor::Client
  autoload :TeeSocket, 'rainbows/revactor/client/tee_socket'
  RD_ARGS = {}
  Rainbows.server.keepalive_timeout > 0 and
    RD_ARGS[:timeout] = Rainbows.server.keepalive_timeout
  attr_reader :kgio_addr

  def initialize(client)
    @client, @rd_args, @ts = client, [ nil ], nil
    io = client.instance_variable_get(:@_io)
    io.close_on_exec = true
    @kgio_addr = if Revactor::TCP::Socket === client
      @rd_args << RD_ARGS
      client.remote_addr
    else
      Kgio::LOCALHOST
    end
  end

  def kgio_read!(nr, buf)
    buf.replace(@client.read)
  end

  def write(buf)
    @client.write(buf)
  end

  def timed_read(buf2)
    buf2.replace(@client.read(*@rd_args))
  end

  def set_input(env, hp)
    env['rack.input'] = 0 == hp.content_length ?
                      NULL_IO : IC.new(@ts = TeeSocket.new(@client), hp)
  end

  def to_io
    @client.instance_variable_get(:@_io)
  end

  def close
    @client.close
    @client = nil
  end

  def closed?
    @client.nil?
  end

  def self.setup
    self.const_set(:IC, Unicorn::HttpRequest.input_class)
    include Rainbows::ProcessClient
    include Methods
  end
end
require 'rainbows/revactor/client/methods'
rainbows-5.0.0/lib/rainbows/revactor/client/0000755000004100000410000000000012641135250021064 5ustar  www-datawww-datarainbows-5.0.0/lib/rainbows/revactor/client/tee_socket.rb0000644000004100000410000000242512641135250023541 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
#
# Revactor Sockets do not implement readpartial, so we emulate just
# enough to avoid mucking with TeeInput internals.  Fortunately
# this code is not heavily used so we can usually avoid the overhead
# of adding a userspace buffer.
class Rainbows::Revactor::Client::TeeSocket
  def initialize(socket)
    # IO::Buffer is used internally by Rev which Revactor is based on
    # so we'll always have it available
    @socket, @rbuf = socket, IO::Buffer.new
  end

  def leftover
    @rbuf.read
  end

  # Revactor socket reads always return an unspecified amount,
  # sometimes too much
  def kgio_read(length, dst = "")
    return dst.replace("") if length == 0

    # always check and return from the userspace buffer first
    @rbuf.size > 0 and return dst.replace(@rbuf.read(length))

    # read off the socket since there was nothing in rbuf
    tmp = @socket.read

    # we didn't read too much, good, just return it straight back
    # to avoid needlessly wasting memory bandwidth
    tmp.size <= length and return dst.replace(tmp)

    # ugh, read returned too much
    @rbuf << tmp[length, tmp.size]
    dst.replace(tmp[0, length])
    rescue EOFError
  end

  # just proxy any remaining methods TeeInput may use
  def close
    @socket.close
  end
end
rainbows-5.0.0/lib/rainbows/revactor/client/methods.rb0000644000004100000410000000271112641135250023055 0ustar  www-datawww-data# -*- encoding: binary -*-
# :enddoc:
module Rainbows::Revactor::Client::Methods
  if IO.method_defined?(:trysendfile)
    def write_body_file(body, range)
      body, client = body_to_io(body), @client
      sock = @client.instance_variable_get(:@_io)
      pfx = Revactor::TCP::Socket === client ? :tcp : :unix
      write_complete = T[:"#{pfx}_write_complete", client]
      closed = T[:"#{pfx}_closed", client]
      offset, count = range ? range : [ 0, body.stat.size ]
      case n = sock.trysendfile(body, offset, count)
      when Integer
        offset += n
        return if 0 == (count -= n)
      when :wait_writable
        # The @_write_buffer is empty at this point, trigger the
        # on_readable method which in turn triggers on_write_complete
        # even though nothing was written
        client.controller = Actor.current
        client.__send__(:enable_write_watcher)
        Actor.receive do |filter|
          filter.when(write_complete) {}
          filter.when(closed) { raise Errno::EPIPE }
        end
      else # nil
        return
      end while true
      ensure
        close_if_private(body)
    end
  end

  def handle_error(e)
    Revactor::TCP::ReadError === e or super
  end

  def write_response(status, headers, body, alive)
    super(status, headers, body, alive) or return
    alive && @ts and @hp.buf << @ts.leftover
  end

  def self.included(klass)
    klass.__send__ :alias_method, :write_body_stream, :write_body_each
  end
end
rainbows-5.0.0/lib/rainbows.rb0000644000004100000410000001136512641135250016314 0ustar  www-datawww-data# -*- encoding: binary -*-
require 'kgio'
require 'unicorn'
# the value passed to TCP_DEFER_ACCEPT actually matters in Linux 2.6.32+
Unicorn::SocketHelper::DEFAULTS[:tcp_defer_accept] = 60

# See http://rainbows.bogomips.org/ for documentation
module Rainbows
  # :stopdoc:
  O = {}

  # map of numeric file descriptors to IO objects to avoid using IO.new
  # and potentially causing race conditions when using /dev/fd/
  FD_MAP = {}.compare_by_identity

  require 'rainbows/const'
  require 'rainbows/http_parser'
  require 'rainbows/http_server'
  autoload :Response, 'rainbows/response'
  autoload :ProcessClient, 'rainbows/process_client'
  autoload :Client, 'rainbows/client'
  autoload :Base, 'rainbows/base'
  autoload :Sendfile, 'rainbows/sendfile'
  autoload :AppPool, 'rainbows/app_pool'
  autoload :DevFdResponse, 'rainbows/dev_fd_response'
  autoload :MaxBody, 'rainbows/max_body'
  autoload :QueuePool, 'rainbows/queue_pool'
  autoload :EvCore, 'rainbows/ev_core'
  autoload :SocketProxy, 'rainbows/socket_proxy'

  # :startdoc:
  # Sleeps the current application dispatch.  This will pick the
  # optimal method to sleep depending on the concurrency model chosen
  # (which may still suck and block the entire process).  Using this
  # with the basic :Coolio or :EventMachine models is not recommended.
  # This should be used within your Rack application.
  def self.sleep(seconds)
    case Rainbows.server.use
    when :FiberPool, :FiberSpawn
      Rainbows::Fiber.sleep(seconds)
    when :RevFiberSpawn, :CoolioFiberSpawn
      Rainbows::Fiber::Coolio::Sleeper.new(seconds)
    when :Revactor
      Actor.sleep(seconds)
    else
      Kernel.sleep(seconds)
    end
  end
  # :stopdoc:

  class << self
    attr_accessor :server
    attr_accessor :cur # may not always be used
    attr_reader :alive
    attr_writer :worker
    attr_writer :forked
    attr_writer :readers
  end

  def self.config!(mod, *opts)
    @forked or abort "#{mod} should only be loaded in a worker process"
    opts.each do |opt|
      mod.const_set(opt.to_s.upcase, Rainbows.server.__send__(opt))
    end
  end

  @alive = true
  @cur = 0
  @expire = nil
  @at_quit = []

  def self.at_quit(&block)
    @at_quit << block
  end

  def self.tick
    @worker.tick = now.to_i
    exit!(2) if @expire && now >= @expire
    @alive && @server.master_pid == Process.ppid or quit!
  end

  def self.cur_alive
    @alive || @cur > 0
  end

  def self.quit!
    unless @expire
      @alive = false
      Rainbows::HttpParser.quit
      @expire = now + (@server.timeout * 2.0)
      tmp = @readers.dup
      @readers.clear
      tmp.each { |s| s.close rescue nil }.clear
      @at_quit.each(&:call)

      # XXX hack to break out of IO.select in worker_loop for some models
      Process.kill(:QUIT, $$)
    end
    false
  end

  # try to use the monotonic clock in Ruby >= 2.1, it is immune to clock
  # offset adjustments and generates less garbage (Float vs Time object)
  begin
    Process.clock_gettime(Process::CLOCK_MONOTONIC)
    def self.now
      Process.clock_gettime(Process::CLOCK_MONOTONIC)
    end
  rescue NameError, NoMethodError
    def self.now # Ruby <= 2.0
      Time.now.to_f
    end
  end

  autoload :Base, "rainbows/base"
  autoload :WriterThreadPool, "rainbows/writer_thread_pool"
  autoload :WriterThreadSpawn, "rainbows/writer_thread_spawn"
  autoload :Revactor, "rainbows/revactor"
  autoload :ThreadSpawn, "rainbows/thread_spawn"
  autoload :ThreadPool, "rainbows/thread_pool"
  autoload :Rev, "rainbows/rev"
  autoload :RevThreadSpawn, "rainbows/rev_thread_spawn"
  autoload :RevThreadPool, "rainbows/rev_thread_pool"
  autoload :RevFiberSpawn, "rainbows/rev_fiber_spawn"
  autoload :Coolio, "rainbows/coolio"
  autoload :CoolioThreadSpawn, "rainbows/coolio_thread_spawn"
  autoload :CoolioThreadPool, "rainbows/coolio_thread_pool"
  autoload :CoolioFiberSpawn, "rainbows/coolio_fiber_spawn"
  autoload :Epoll, "rainbows/epoll"
  autoload :XEpoll, "rainbows/xepoll"
  autoload :EventMachine, "rainbows/event_machine"
  autoload :FiberSpawn, "rainbows/fiber_spawn"
  autoload :FiberPool, "rainbows/fiber_pool"
  autoload :ActorSpawn, "rainbows/actor_spawn"
  autoload :NeverBlock, "rainbows/never_block"
  autoload :XEpollThreadSpawn, "rainbows/xepoll_thread_spawn"
  autoload :XEpollThreadPool, "rainbows/xepoll_thread_pool"
  autoload :StreamResponseEpoll, "rainbows/stream_response_epoll"

  autoload :Fiber, 'rainbows/fiber' # core class
  autoload :StreamFile, 'rainbows/stream_file'
  autoload :ThreadTimeout, 'rainbows/thread_timeout'
  autoload :WorkerYield, 'rainbows/worker_yield'
  autoload :SyncClose, 'rainbows/sync_close'
  autoload :ReverseProxy, 'rainbows/reverse_proxy'
  autoload :JoinThreads, 'rainbows/join_threads'
  autoload :PoolSize, 'rainbows/pool_size'
end

require 'rainbows/error'
require 'rainbows/configurator'
rainbows-5.0.0/.manifest0000644000004100000410000001672112641135250015205 0ustar  www-datawww-data.document
.gitattributes
.gitignore
.manifest
.olddoc.yml
COPYING
DEPLOY
Documentation/.gitignore
Documentation/GNUmakefile
Documentation/comparison.haml
Documentation/rainbows.1.txt
FAQ
GIT-VERSION-FILE
GIT-VERSION-GEN
GNUmakefile
HACKING
LATEST
LICENSE
NEWS
README
SIGNALS
Sandbox
Static_Files
TODO
TUNING
Test_Suite
archive/.gitignore
archive/rfmig.rb
archive/slrnpull.conf
bin/rainbows
examples/reverse_proxy.ru
lib/rainbows.rb
lib/rainbows/actor_spawn.rb
lib/rainbows/app_pool.rb
lib/rainbows/base.rb
lib/rainbows/client.rb
lib/rainbows/configurator.rb
lib/rainbows/const.rb
lib/rainbows/coolio.rb
lib/rainbows/coolio/client.rb
lib/rainbows/coolio/core.rb
lib/rainbows/coolio/heartbeat.rb
lib/rainbows/coolio/master.rb
lib/rainbows/coolio/response_chunk_pipe.rb
lib/rainbows/coolio/response_pipe.rb
lib/rainbows/coolio/server.rb
lib/rainbows/coolio/thread_client.rb
lib/rainbows/coolio_fiber_spawn.rb
lib/rainbows/coolio_support.rb
lib/rainbows/coolio_thread_pool.rb
lib/rainbows/coolio_thread_pool/client.rb
lib/rainbows/coolio_thread_pool/watcher.rb
lib/rainbows/coolio_thread_spawn.rb
lib/rainbows/coolio_thread_spawn/client.rb
lib/rainbows/dev_fd_response.rb
lib/rainbows/epoll.rb
lib/rainbows/epoll/client.rb
lib/rainbows/epoll/response_chunk_pipe.rb
lib/rainbows/epoll/response_pipe.rb
lib/rainbows/epoll/server.rb
lib/rainbows/error.rb
lib/rainbows/ev_core.rb
lib/rainbows/ev_core/cap_input.rb
lib/rainbows/event_machine.rb
lib/rainbows/event_machine/client.rb
lib/rainbows/event_machine/response_chunk_pipe.rb
lib/rainbows/event_machine/response_pipe.rb
lib/rainbows/event_machine/server.rb
lib/rainbows/event_machine/try_defer.rb
lib/rainbows/fiber.rb
lib/rainbows/fiber/base.rb
lib/rainbows/fiber/body.rb
lib/rainbows/fiber/coolio.rb
lib/rainbows/fiber/coolio/heartbeat.rb
lib/rainbows/fiber/coolio/methods.rb
lib/rainbows/fiber/coolio/server.rb
lib/rainbows/fiber/coolio/sleeper.rb
lib/rainbows/fiber/io.rb
lib/rainbows/fiber/io/compat.rb
lib/rainbows/fiber/io/methods.rb
lib/rainbows/fiber/io/pipe.rb
lib/rainbows/fiber/io/socket.rb
lib/rainbows/fiber/queue.rb
lib/rainbows/fiber_pool.rb
lib/rainbows/fiber_spawn.rb
lib/rainbows/http_parser.rb
lib/rainbows/http_server.rb
lib/rainbows/join_threads.rb
lib/rainbows/max_body.rb
lib/rainbows/max_body/rewindable_wrapper.rb
lib/rainbows/max_body/wrapper.rb
lib/rainbows/never_block.rb
lib/rainbows/never_block/core.rb
lib/rainbows/never_block/event_machine.rb
lib/rainbows/pool_size.rb
lib/rainbows/process_client.rb
lib/rainbows/queue_pool.rb
lib/rainbows/response.rb
lib/rainbows/rev.rb
lib/rainbows/rev_fiber_spawn.rb
lib/rainbows/rev_thread_pool.rb
lib/rainbows/rev_thread_spawn.rb
lib/rainbows/revactor.rb
lib/rainbows/revactor/client.rb
lib/rainbows/revactor/client/methods.rb
lib/rainbows/revactor/client/tee_socket.rb
lib/rainbows/revactor/proxy.rb
lib/rainbows/reverse_proxy.rb
lib/rainbows/reverse_proxy/coolio.rb
lib/rainbows/reverse_proxy/ev_client.rb
lib/rainbows/reverse_proxy/event_machine.rb
lib/rainbows/reverse_proxy/multi_thread.rb
lib/rainbows/reverse_proxy/synchronous.rb
lib/rainbows/sendfile.rb
lib/rainbows/server_token.rb
lib/rainbows/socket_proxy.rb
lib/rainbows/stream_file.rb
lib/rainbows/stream_response_epoll.rb
lib/rainbows/stream_response_epoll/client.rb
lib/rainbows/sync_close.rb
lib/rainbows/thread_pool.rb
lib/rainbows/thread_spawn.rb
lib/rainbows/thread_timeout.rb
lib/rainbows/version.rb
lib/rainbows/worker_yield.rb
lib/rainbows/writer_thread_pool.rb
lib/rainbows/writer_thread_pool/client.rb
lib/rainbows/writer_thread_spawn.rb
lib/rainbows/writer_thread_spawn/client.rb
lib/rainbows/xepoll.rb
lib/rainbows/xepoll/client.rb
lib/rainbows/xepoll_thread_pool.rb
lib/rainbows/xepoll_thread_pool/client.rb
lib/rainbows/xepoll_thread_spawn.rb
lib/rainbows/xepoll_thread_spawn/client.rb
local.mk.sample
man/man1/rainbows.1
pkg.mk
rainbows.gemspec
setup.rb
t/.gitignore
t/GNUmakefile
t/README
t/app_deferred.ru
t/async-response-no-autochunk.ru
t/async-response.ru
t/async_chunk_app.ru
t/async_examples/README
t/async_examples/async_app.ru
t/async_examples/async_tailer.ru
t/async_sinatra.ru
t/bin/content-md5-put
t/bin/sha1sum.rb
t/bin/unused_listen
t/byte-range-common.sh
t/client_header_buffer_size.ru
t/close-has-env.ru
t/close-pipe-response.ru
t/close-pipe-to_path-response.ru
t/content-md5.ru
t/cramp/README
t/cramp/rainsocket.ru
t/cramp/streaming.ru
t/env.ru
t/env_rack_env.ru
t/fast-pipe-response.ru
t/file-wrap-to_path.ru
t/fork-sleep.ru
t/heartbeat-timeout.ru
t/hijack.ru
t/kgio-pipe-response.ru
t/large-file-response.ru
t/my-tap-lib.sh
t/rack-fiber_pool/app.ru
t/sha1-random-size.ru
t/sha1.ru
t/simple-http_ActorSpawn.ru
t/simple-http_Base.ru
t/simple-http_Coolio.ru
t/simple-http_CoolioFiberSpawn.ru
t/simple-http_CoolioThreadPool.ru
t/simple-http_CoolioThreadSpawn.ru
t/simple-http_Epoll.ru
t/simple-http_EventMachine.ru
t/simple-http_FiberPool.ru
t/simple-http_FiberSpawn.ru
t/simple-http_NeverBlock.ru
t/simple-http_Rev.ru
t/simple-http_RevFiberSpawn.ru
t/simple-http_RevThreadPool.ru
t/simple-http_RevThreadSpawn.ru
t/simple-http_Revactor.ru
t/simple-http_ThreadPool.ru
t/simple-http_ThreadSpawn.ru
t/simple-http_WriterThreadPool.ru
t/simple-http_WriterThreadSpawn.ru
t/simple-http_XEpoll.ru
t/simple-http_XEpollThreadPool.ru
t/simple-http_XEpollThreadSpawn.ru
t/sleep.ru
t/t0000-simple-http.sh
t/t0000.ru
t/t0001-unix-http.sh
t/t0002-graceful.sh
t/t0002-parser-error.sh
t/t0003-reopen-logs.sh
t/t0004-heartbeat-timeout.sh
t/t0005-large-file-response.sh
t/t0006-process-rack-env.sh
t/t0007-worker-follows-master-to-death.sh
t/t0008-ensure-usable-after-limit.sh
t/t0009-broken-app.sh
t/t0009.ru
t/t0010-keepalive-timeout-effective.sh
t/t0011-close-on-exec-set.sh
t/t0012-spurious-wakeups-quiet.sh
t/t0013-reload-bad-config.sh
t/t0014-config-conflict.sh
t/t0015-working_directory.sh
t/t0016-onenine-encoding-is-tricky.sh
t/t0016.rb
t/t0017-keepalive-timeout-zero.sh
t/t0018-reload-restore-settings.sh
t/t0019-keepalive-cpu-usage.sh
t/t0020-large-sendfile-response.sh
t/t0021-sendfile-wrap-to_path.sh
t/t0022-copy_stream-byte-range.sh
t/t0023-sendfile-byte-range.sh
t/t0024-pipelined-sendfile-response.sh
t/t0025-write-on-close.sh
t/t0026-splice-copy_stream-byte-range.sh
t/t0027-nil-copy_stream.sh
t/t0030-fast-pipe-response.sh
t/t0031-close-pipe-response.sh
t/t0032-close-pipe-to_path-response.sh
t/t0034-pipelined-pipe-response.sh
t/t0035-kgio-pipe-response.sh
t/t0040-keepalive_requests-setting.sh
t/t0041-optional-pool-size.sh
t/t0042-client_header_buffer_size.sh
t/t0043-quit-keepalive-disconnect.sh
t/t0045-client_max_header_size.sh
t/t0050-response-body-close-has-env.sh
t/t0100-rack-input-hammer-chunked.sh
t/t0100-rack-input-hammer-content-length.sh
t/t0101-rack-input-trailer.sh
t/t0102-rack-input-short.sh
t/t0103-rack-input-limit.sh
t/t0104-rack-input-limit-tiny.sh
t/t0105-rack-input-limit-bigger.sh
t/t0106-rack-input-keepalive.sh
t/t0107-rack-input-limit-zero.sh
t/t0113-rewindable-input-false.sh
t/t0113.ru
t/t0114-rewindable-input-true.sh
t/t0114.ru
t/t0200-async-response.sh
t/t0201-async-response-no-autochunk.sh
t/t0202-async-response-one-oh.sh
t/t0300-async_sinatra.sh
t/t0400-em-async-app.sh
t/t0401-em-async-tailer.sh
t/t0402-async-keepalive.sh
t/t0500-cramp-streaming.sh
t/t0501-cramp-rainsocket.sh
t/t0600-rack-fiber_pool.sh
t/t0700-app-deferred.sh
t/t0800-rack-hijack.sh
t/t9000-rack-app-pool.sh
t/t9000.ru
t/t9001-sendfile-to-path.sh
t/t9001.ru
t/t9002-server-token.sh
t/t9002.ru
t/t9100-thread-timeout.sh
t/t9100.ru
t/t9101-thread-timeout-threshold.sh
t/t9101.ru
t/test-lib.sh
t/test_isolate.rb
t/test_isolate_cramp.rb
t/times.ru
t/worker-follows-master-to-death.ru
t/write-on-close.ru
vs_Unicorn
rainbows-5.0.0/metadata.yml0000644000004100000410000003002012641135250015665 0ustar  www-datawww-data--- !ruby/object:Gem::Specification
name: rainbows
version: !ruby/object:Gem::Version
  version: 5.0.0
platform: ruby
authors:
- Rainbows! hackers
autorequire: 
bindir: bin
cert_chain: []
date: 2015-11-25 00:00:00.000000000 Z
dependencies:
- !ruby/object:Gem::Dependency
  name: rack
  requirement: !ruby/object:Gem::Requirement
    requirements:
    - - "~>"
      - !ruby/object:Gem::Version
        version: '1.1'
  type: :runtime
  prerelease: false
  version_requirements: !ruby/object:Gem::Requirement
    requirements:
    - - "~>"
      - !ruby/object:Gem::Version
        version: '1.1'
- !ruby/object:Gem::Dependency
  name: kgio
  requirement: !ruby/object:Gem::Requirement
    requirements:
    - - "~>"
      - !ruby/object:Gem::Version
        version: '2.5'
  type: :runtime
  prerelease: false
  version_requirements: !ruby/object:Gem::Requirement
    requirements:
    - - "~>"
      - !ruby/object:Gem::Version
        version: '2.5'
- !ruby/object:Gem::Dependency
  name: unicorn
  requirement: !ruby/object:Gem::Requirement
    requirements:
    - - "~>"
      - !ruby/object:Gem::Version
        version: '5.0'
  type: :runtime
  prerelease: false
  version_requirements: !ruby/object:Gem::Requirement
    requirements:
    - - "~>"
      - !ruby/object:Gem::Version
        version: '5.0'
- !ruby/object:Gem::Dependency
  name: isolate
  requirement: !ruby/object:Gem::Requirement
    requirements:
    - - "~>"
      - !ruby/object:Gem::Version
        version: '3.1'
  type: :development
  prerelease: false
  version_requirements: !ruby/object:Gem::Requirement
    requirements:
    - - "~>"
      - !ruby/object:Gem::Version
        version: '3.1'
- !ruby/object:Gem::Dependency
  name: olddoc
  requirement: !ruby/object:Gem::Requirement
    requirements:
    - - "~>"
      - !ruby/object:Gem::Version
        version: '1.0'
  type: :development
  prerelease: false
  version_requirements: !ruby/object:Gem::Requirement
    requirements:
    - - "~>"
      - !ruby/object:Gem::Version
        version: '1.0'
description: |-
  \Rainbows! is an HTTP server for sleepy Rack applications.  It is based on
  unicorn, but designed to handle applications that expect long
  request/response times and/or slow clients.
email: rainbows-public@bogomips.org
executables:
- rainbows
extensions: []
extra_rdoc_files:
- DEPLOY
- FAQ
- lib/rainbows.rb
- lib/rainbows/actor_spawn.rb
- lib/rainbows/app_pool.rb
- lib/rainbows/base.rb
- lib/rainbows/configurator.rb
- lib/rainbows/coolio.rb
- lib/rainbows/coolio_thread_pool.rb
- lib/rainbows/coolio_thread_spawn.rb
- lib/rainbows/dev_fd_response.rb
- lib/rainbows/epoll.rb
- lib/rainbows/event_machine.rb
- lib/rainbows/fiber_pool.rb
- lib/rainbows/fiber_spawn.rb
- lib/rainbows/max_body.rb
- lib/rainbows/never_block.rb
- lib/rainbows/rev.rb
- lib/rainbows/rev_fiber_spawn.rb
- lib/rainbows/rev_thread_pool.rb
- lib/rainbows/rev_thread_spawn.rb
- lib/rainbows/revactor.rb
- lib/rainbows/sendfile.rb
- lib/rainbows/server_token.rb
- lib/rainbows/stream_response_epoll.rb
- lib/rainbows/thread_pool.rb
- lib/rainbows/thread_spawn.rb
- lib/rainbows/thread_timeout.rb
- lib/rainbows/worker_yield.rb
- lib/rainbows/writer_thread_pool.rb
- lib/rainbows/writer_thread_spawn.rb
- lib/rainbows/xepoll.rb
- lib/rainbows/xepoll_thread_pool.rb
- lib/rainbows/xepoll_thread_spawn.rb
- LATEST
- LICENSE
- NEWS
- rainbows_1
- README
- SIGNALS
- TODO
- TUNING
- vs_Unicorn
- Summary
- Test_Suite
- Static_Files
- Sandbox
- HACKING
files:
- ".document"
- ".gitattributes"
- ".gitignore"
- ".manifest"
- ".olddoc.yml"
- COPYING
- DEPLOY
- Documentation/.gitignore
- Documentation/GNUmakefile
- Documentation/comparison.haml
- Documentation/rainbows.1.txt
- FAQ
- GIT-VERSION-FILE
- GIT-VERSION-GEN
- GNUmakefile
- HACKING
- LATEST
- LICENSE
- NEWS
- README
- SIGNALS
- Sandbox
- Static_Files
- Summary
- TODO
- TUNING
- Test_Suite
- archive/.gitignore
- archive/rfmig.rb
- archive/slrnpull.conf
- bin/rainbows
- examples/reverse_proxy.ru
- lib/rainbows.rb
- lib/rainbows/actor_spawn.rb
- lib/rainbows/app_pool.rb
- lib/rainbows/base.rb
- lib/rainbows/client.rb
- lib/rainbows/configurator.rb
- lib/rainbows/const.rb
- lib/rainbows/coolio.rb
- lib/rainbows/coolio/client.rb
- lib/rainbows/coolio/core.rb
- lib/rainbows/coolio/heartbeat.rb
- lib/rainbows/coolio/master.rb
- lib/rainbows/coolio/response_chunk_pipe.rb
- lib/rainbows/coolio/response_pipe.rb
- lib/rainbows/coolio/server.rb
- lib/rainbows/coolio/thread_client.rb
- lib/rainbows/coolio_fiber_spawn.rb
- lib/rainbows/coolio_support.rb
- lib/rainbows/coolio_thread_pool.rb
- lib/rainbows/coolio_thread_pool/client.rb
- lib/rainbows/coolio_thread_pool/watcher.rb
- lib/rainbows/coolio_thread_spawn.rb
- lib/rainbows/coolio_thread_spawn/client.rb
- lib/rainbows/dev_fd_response.rb
- lib/rainbows/epoll.rb
- lib/rainbows/epoll/client.rb
- lib/rainbows/epoll/response_chunk_pipe.rb
- lib/rainbows/epoll/response_pipe.rb
- lib/rainbows/epoll/server.rb
- lib/rainbows/error.rb
- lib/rainbows/ev_core.rb
- lib/rainbows/ev_core/cap_input.rb
- lib/rainbows/event_machine.rb
- lib/rainbows/event_machine/client.rb
- lib/rainbows/event_machine/response_chunk_pipe.rb
- lib/rainbows/event_machine/response_pipe.rb
- lib/rainbows/event_machine/server.rb
- lib/rainbows/event_machine/try_defer.rb
- lib/rainbows/fiber.rb
- lib/rainbows/fiber/base.rb
- lib/rainbows/fiber/body.rb
- lib/rainbows/fiber/coolio.rb
- lib/rainbows/fiber/coolio/heartbeat.rb
- lib/rainbows/fiber/coolio/methods.rb
- lib/rainbows/fiber/coolio/server.rb
- lib/rainbows/fiber/coolio/sleeper.rb
- lib/rainbows/fiber/io.rb
- lib/rainbows/fiber/io/compat.rb
- lib/rainbows/fiber/io/methods.rb
- lib/rainbows/fiber/io/pipe.rb
- lib/rainbows/fiber/io/socket.rb
- lib/rainbows/fiber/queue.rb
- lib/rainbows/fiber_pool.rb
- lib/rainbows/fiber_spawn.rb
- lib/rainbows/http_parser.rb
- lib/rainbows/http_server.rb
- lib/rainbows/join_threads.rb
- lib/rainbows/max_body.rb
- lib/rainbows/max_body/rewindable_wrapper.rb
- lib/rainbows/max_body/wrapper.rb
- lib/rainbows/never_block.rb
- lib/rainbows/never_block/core.rb
- lib/rainbows/never_block/event_machine.rb
- lib/rainbows/pool_size.rb
- lib/rainbows/process_client.rb
- lib/rainbows/queue_pool.rb
- lib/rainbows/response.rb
- lib/rainbows/rev.rb
- lib/rainbows/rev_fiber_spawn.rb
- lib/rainbows/rev_thread_pool.rb
- lib/rainbows/rev_thread_spawn.rb
- lib/rainbows/revactor.rb
- lib/rainbows/revactor/client.rb
- lib/rainbows/revactor/client/methods.rb
- lib/rainbows/revactor/client/tee_socket.rb
- lib/rainbows/revactor/proxy.rb
- lib/rainbows/reverse_proxy.rb
- lib/rainbows/reverse_proxy/coolio.rb
- lib/rainbows/reverse_proxy/ev_client.rb
- lib/rainbows/reverse_proxy/event_machine.rb
- lib/rainbows/reverse_proxy/multi_thread.rb
- lib/rainbows/reverse_proxy/synchronous.rb
- lib/rainbows/sendfile.rb
- lib/rainbows/server_token.rb
- lib/rainbows/socket_proxy.rb
- lib/rainbows/stream_file.rb
- lib/rainbows/stream_response_epoll.rb
- lib/rainbows/stream_response_epoll/client.rb
- lib/rainbows/sync_close.rb
- lib/rainbows/thread_pool.rb
- lib/rainbows/thread_spawn.rb
- lib/rainbows/thread_timeout.rb
- lib/rainbows/version.rb
- lib/rainbows/worker_yield.rb
- lib/rainbows/writer_thread_pool.rb
- lib/rainbows/writer_thread_pool/client.rb
- lib/rainbows/writer_thread_spawn.rb
- lib/rainbows/writer_thread_spawn/client.rb
- lib/rainbows/xepoll.rb
- lib/rainbows/xepoll/client.rb
- lib/rainbows/xepoll_thread_pool.rb
- lib/rainbows/xepoll_thread_pool/client.rb
- lib/rainbows/xepoll_thread_spawn.rb
- lib/rainbows/xepoll_thread_spawn/client.rb
- local.mk.sample
- man/man1/rainbows.1
- pkg.mk
- rainbows.gemspec
- rainbows_1
- setup.rb
- t/.gitignore
- t/GNUmakefile
- t/README
- t/app_deferred.ru
- t/async-response-no-autochunk.ru
- t/async-response.ru
- t/async_chunk_app.ru
- t/async_examples/README
- t/async_examples/async_app.ru
- t/async_examples/async_tailer.ru
- t/async_sinatra.ru
- t/bin/content-md5-put
- t/bin/sha1sum.rb
- t/bin/unused_listen
- t/byte-range-common.sh
- t/client_header_buffer_size.ru
- t/close-has-env.ru
- t/close-pipe-response.ru
- t/close-pipe-to_path-response.ru
- t/content-md5.ru
- t/cramp/README
- t/cramp/rainsocket.ru
- t/cramp/streaming.ru
- t/env.ru
- t/env_rack_env.ru
- t/fast-pipe-response.ru
- t/file-wrap-to_path.ru
- t/fork-sleep.ru
- t/heartbeat-timeout.ru
- t/hijack.ru
- t/kgio-pipe-response.ru
- t/large-file-response.ru
- t/my-tap-lib.sh
- t/rack-fiber_pool/app.ru
- t/sha1-random-size.ru
- t/sha1.ru
- t/simple-http_ActorSpawn.ru
- t/simple-http_Base.ru
- t/simple-http_Coolio.ru
- t/simple-http_CoolioFiberSpawn.ru
- t/simple-http_CoolioThreadPool.ru
- t/simple-http_CoolioThreadSpawn.ru
- t/simple-http_Epoll.ru
- t/simple-http_EventMachine.ru
- t/simple-http_FiberPool.ru
- t/simple-http_FiberSpawn.ru
- t/simple-http_NeverBlock.ru
- t/simple-http_Rev.ru
- t/simple-http_RevFiberSpawn.ru
- t/simple-http_RevThreadPool.ru
- t/simple-http_RevThreadSpawn.ru
- t/simple-http_Revactor.ru
- t/simple-http_ThreadPool.ru
- t/simple-http_ThreadSpawn.ru
- t/simple-http_WriterThreadPool.ru
- t/simple-http_WriterThreadSpawn.ru
- t/simple-http_XEpoll.ru
- t/simple-http_XEpollThreadPool.ru
- t/simple-http_XEpollThreadSpawn.ru
- t/sleep.ru
- t/t0000-simple-http.sh
- t/t0000.ru
- t/t0001-unix-http.sh
- t/t0002-graceful.sh
- t/t0002-parser-error.sh
- t/t0003-reopen-logs.sh
- t/t0004-heartbeat-timeout.sh
- t/t0005-large-file-response.sh
- t/t0006-process-rack-env.sh
- t/t0007-worker-follows-master-to-death.sh
- t/t0008-ensure-usable-after-limit.sh
- t/t0009-broken-app.sh
- t/t0009.ru
- t/t0010-keepalive-timeout-effective.sh
- t/t0011-close-on-exec-set.sh
- t/t0012-spurious-wakeups-quiet.sh
- t/t0013-reload-bad-config.sh
- t/t0014-config-conflict.sh
- t/t0015-working_directory.sh
- t/t0016-onenine-encoding-is-tricky.sh
- t/t0016.rb
- t/t0017-keepalive-timeout-zero.sh
- t/t0018-reload-restore-settings.sh
- t/t0019-keepalive-cpu-usage.sh
- t/t0020-large-sendfile-response.sh
- t/t0021-sendfile-wrap-to_path.sh
- t/t0022-copy_stream-byte-range.sh
- t/t0023-sendfile-byte-range.sh
- t/t0024-pipelined-sendfile-response.sh
- t/t0025-write-on-close.sh
- t/t0026-splice-copy_stream-byte-range.sh
- t/t0027-nil-copy_stream.sh
- t/t0030-fast-pipe-response.sh
- t/t0031-close-pipe-response.sh
- t/t0032-close-pipe-to_path-response.sh
- t/t0034-pipelined-pipe-response.sh
- t/t0035-kgio-pipe-response.sh
- t/t0040-keepalive_requests-setting.sh
- t/t0041-optional-pool-size.sh
- t/t0042-client_header_buffer_size.sh
- t/t0043-quit-keepalive-disconnect.sh
- t/t0045-client_max_header_size.sh
- t/t0050-response-body-close-has-env.sh
- t/t0100-rack-input-hammer-chunked.sh
- t/t0100-rack-input-hammer-content-length.sh
- t/t0101-rack-input-trailer.sh
- t/t0102-rack-input-short.sh
- t/t0103-rack-input-limit.sh
- t/t0104-rack-input-limit-tiny.sh
- t/t0105-rack-input-limit-bigger.sh
- t/t0106-rack-input-keepalive.sh
- t/t0107-rack-input-limit-zero.sh
- t/t0113-rewindable-input-false.sh
- t/t0113.ru
- t/t0114-rewindable-input-true.sh
- t/t0114.ru
- t/t0200-async-response.sh
- t/t0201-async-response-no-autochunk.sh
- t/t0202-async-response-one-oh.sh
- t/t0300-async_sinatra.sh
- t/t0400-em-async-app.sh
- t/t0401-em-async-tailer.sh
- t/t0402-async-keepalive.sh
- t/t0500-cramp-streaming.sh
- t/t0501-cramp-rainsocket.sh
- t/t0600-rack-fiber_pool.sh
- t/t0700-app-deferred.sh
- t/t0800-rack-hijack.sh
- t/t9000-rack-app-pool.sh
- t/t9000.ru
- t/t9001-sendfile-to-path.sh
- t/t9001.ru
- t/t9002-server-token.sh
- t/t9002.ru
- t/t9100-thread-timeout.sh
- t/t9100.ru
- t/t9101-thread-timeout-threshold.sh
- t/t9101.ru
- t/test-lib.sh
- t/test_isolate.rb
- t/test_isolate_cramp.rb
- t/times.ru
- t/worker-follows-master-to-death.ru
- t/write-on-close.ru
- vs_Unicorn
homepage: http://rainbows.bogomips.org/
licenses:
- GPL-2.0+
- Nonstandard
metadata: {}
post_install_message: 
rdoc_options: []
require_paths:
- lib
required_ruby_version: !ruby/object:Gem::Requirement
  requirements:
  - - ">="
    - !ruby/object:Gem::Version
      version: '0'
required_rubygems_version: !ruby/object:Gem::Requirement
  requirements:
  - - ">="
    - !ruby/object:Gem::Version
      version: '0'
requirements: []
rubyforge_project: 
rubygems_version: 2.5.0
signing_key: 
specification_version: 4
summary: "- unicorn for sleepy apps and slow clients"
test_files: []
rainbows-5.0.0/rainbows.gemspec0000644000004100000410000000460212641135250016562 0ustar  www-datawww-data# -*- encoding: binary -*-
ENV["VERSION"] or abort "VERSION= must be specified"
manifest = File.readlines('.manifest').map! { |x| x.chomp! }
require 'olddoc'
extend Olddoc::Gemspec
name, summary, title = readme_metadata

Gem::Specification.new do |s|
  s.name = %q{rainbows}
  s.version = ENV["VERSION"].dup

  s.authors = ["#{name} hackers"]
  s.description = readme_description
  s.email = %q{rainbows-public@bogomips.org}
  s.executables = %w(rainbows)
  s.extra_rdoc_files = extra_rdoc_files(manifest)
  s.files = manifest
  s.homepage = Olddoc.config['rdoc_url']
  s.summary = summary

  # we want a newer Rack for a valid HeaderHash#each
  s.add_dependency(%q, ['~> 1.1'])

  # kgio 2.5 has kgio_wait_* methods that take optional timeout args
  s.add_dependency(%q, ['~> 2.5'])

  # we need unicorn for the HTTP parser and process management
  # we need unicorn 4.8.0+ since we depend on undocumented/unsupported
  # unicorn internals.
  s.add_dependency(%q, ["~> 5.0"])

  s.add_development_dependency(%q, "~> 3.1")
  s.add_development_dependency(%q, "~> 1.0")

  # optional runtime dependencies depending on configuration
  # see t/test_isolate.rb for the exact versions we've tested with
  #
  # Revactor >= 0.1.5 includes UNIX domain socket support
  # s.add_dependency(%q, [">= 0.1.5"])
  #
  # Revactor depends on Rev, too, 0.3.0 got the ability to attach IOs
  # s.add_dependency(%q, [">= 0.3.2"])
  #
  # Cool.io is the new Rev, but it doesn't work with Revactor
  # s.add_dependency(%q, [">= 1.0"])
  #
  # Rev depends on IOBuffer, which got faster in 0.1.3
  # s.add_dependency(%q, [">= 0.1.3"])
  #
  # We use the new EM::attach/watch API in 0.12.10
  # s.add_dependency(%q, ["~> 0.12.10"])
  #
  # NeverBlock, currently only available on http://gems.github.com/
  # s.add_dependency(%q, ["~> 0.1.6.1"])

  # Note: To avoid ambiguity, we intentionally avoid the SPDX-compatible
  # 'Ruby' here since Ruby 1.9.3 switched to BSD-2-Clause license while
  # we already inherited our license from Mongrel during Ruby 1.8.
  # We cannot automatically switch licenses when Ruby changes their license,
  # so we remain optionally-licensed under the terms of Ruby 1.8 despite
  # not having a good way to specify this in an SPDX-compatible way...
  s.licenses = ['GPL-2.0+', 'Nonstandard'] # Nonstandard = 'Ruby 1.8'
end
rainbows-5.0.0/man/0000755000004100000410000000000012641135250014142 5ustar  www-datawww-datarainbows-5.0.0/man/man1/0000755000004100000410000000000012641135250014776 5ustar  www-datawww-datarainbows-5.0.0/man/man1/rainbows.10000644000004100000410000001410112641135250016701 0ustar  www-datawww-data.TH rainbows 1 "December 3, 2009" "Rainbows! User Manual"
.SH NAME
.PP
rainbows - rackup-like command to launch Rainbows!
.SH SYNOPSIS
.PP
rainbows [-c CONFIG_FILE] [-E RACK_ENV] [-D] [RACKUP_FILE]
.SH DESCRIPTION
.PP
A rackup(1)-like command to launch Rack applications using Rainbows!.
It is expected to be started in your application root (APP_ROOT), but
the "working_directory" directive may be used in the CONFIG_FILE.
.PP
While Rainbows! takes a myriad of command-line options for compatibility
with ruby(1) and rackup(1), it is recommended to stick to the few
command-line options specified in the SYNOPSIS and use the CONFIG_FILE
as much as possible.
.SH RACKUP FILE
.PP
This defaults to "config.ru" in APP_ROOT.
It should be the same file used by rackup(1) and other Rack launchers,
it uses the \f[I]Rack::Builder\f[] DSL.
.PP
Embedded command-line options are mostly parsed for compatibility with
rackup(1) but strongly discouraged.
.SH UNICORN OPTIONS
.TP
.B -c, --config-file CONFIG_FILE
Path to the Unicorn-specific config file.
The config file is implemented as a Ruby DSL, so Ruby code may executed.
See the RDoc/ri for the \f[I]Unicorn::Configurator\f[] class for the
full list of directives available from the DSL.
.RS
.RE
.TP
.B -D, --daemonize
Run daemonized in the background.
The process is detached from the controlling terminal and stdin is
redirected to "/dev/null".
Unlike many common UNIX daemons, we do not chdir to "/" upon
daemonization to allow more control over the startup/upgrade process.
Unless specified in the CONFIG_FILE, stderr and stdout will also be
redirected to "/dev/null".
.RS
.RE
.TP
.B -E, --env RACK_ENV
Run under the given RACK_ENV.
See the RACK ENVIRONMENT section for more details.
.RS
.RE
.TP
.B -l, --listen ADDRESS
Listens on a given ADDRESS.
ADDRESS may be in the form of HOST:PORT or PATH, HOST:PORT is taken to
mean a TCP socket and PATH is meant to be a path to a UNIX domain
socket.
Defaults to "0.0.0.0:8080" (all addresses on TCP port 8080) For
production deployments, specifying the "listen" directive in CONFIG_FILE
is recommended as it allows fine-tuning of socket options.
.RS
.RE
.TP
.B -N, --no-default-middleware
Disables loading middleware implied by RACK_ENV.
This bypasses the configuration documented in the RACK ENVIRONMENT
section, but still allows RACK_ENV to be used for
application/framework-specific purposes.
.RS
.RE
.SH RACKUP COMPATIBILITY OPTIONS
.TP
.B -o, --host HOST
Listen on a TCP socket belonging to HOST, default is "0.0.0.0" (all
addresses).
If specified multiple times on the command-line, only the last-specified
value takes effect.
This option only exists for compatibility with the rackup(1) command,
use of "-l"/"--listen" switch is recommended instead.
.RS
.RE
.TP
.B -p, --port PORT
Listen on the specified TCP PORT, default is 8080.
If specified multiple times on the command-line, only the last-specified
value takes effect.
This option only exists for compatibility with the rackup(1) command,
use of "-l"/"--listen" switch is recommended instead.
.RS
.RE
.TP
.B -s, --server SERVER
No-op, this exists only for compatibility with rackup(1).
.RS
.RE
.SH RUBY OPTIONS
.TP
.B -e, --eval LINE
Evaluate a LINE of Ruby code.
This evaluation happens immediately as the command-line is being parsed.
.RS
.RE
.TP
.B -d, --debug
Turn on debug mode, the $DEBUG variable is set to true.
.RS
.RE
.TP
.B -w, --warn
Turn on verbose warnings, the $VERBOSE variable is set to true.
.RS
.RE
.TP
.B -I, --include PATH
specify $LOAD_PATH.
PATH will be prepended to $LOAD_PATH.
The \[aq]:\[aq] character may be used to delimit multiple directories.
This directive may be used more than once.
Modifications to $LOAD_PATH take place immediately and in the order they
were specified on the command-line.
.RS
.RE
.TP
.B -r, --require LIBRARY
require a specified LIBRARY before executing the application.
The "require" statement will be executed immediately and in the order
they were specified on the command-line.
.RS
.RE
.SH SIGNALS
.PP
The following UNIX signals may be sent to the master process:
.IP \[bu] 2
HUP - reload config file, app, and gracefully restart all workers
.IP \[bu] 2
INT/TERM - quick shutdown, kills all workers immediately
.IP \[bu] 2
QUIT - graceful shutdown, waits for workers to finish their current
request before finishing.
.IP \[bu] 2
USR1 - reopen all logs owned by the master and all workers See
Unicorn::Util.reopen_logs for what is considered a log.
.IP \[bu] 2
USR2 - reexecute the running binary.
A separate QUIT should be sent to the original process once the child is
verified to be up and running.
.IP \[bu] 2
WINCH - gracefully stops workers but keep the master running.
This will only work for daemonized processes.
.IP \[bu] 2
TTIN - increment the number of worker processes by one
.IP \[bu] 2
TTOU - decrement the number of worker processes by one
.PP
See the SIGNALS (http://rainbows.bogomips.org/SIGNALS.html) document for
full description of all signals used by Rainbows!.
.SH RACK ENVIRONMENT
.PP
Accepted values of RACK_ENV and the middleware they automatically load
(outside of RACKUP_FILE) are exactly as those in rackup(1):
.IP \[bu] 2
development - loads Rack::CommonLogger, Rack::ShowExceptions, and
Rack::Lint middleware
.IP \[bu] 2
deployment - loads Rack::CommonLogger middleware
.IP \[bu] 2
none - loads no middleware at all, relying entirely on RACKUP_FILE
.PP
All unrecognized values for RACK_ENV are assumed to be "none".
Production deployments are strongly encouraged to use "deployment" or
"none" for maximum performance.
.PP
Note the Rack::ContentLength and Rack::Chunked middlewares are also
loaded by "deployment" and "development", but no other values of
RACK_ENV.
If needed, they must be individually specified in the RACKUP_FILE, some
frameworks do not require them.
.SH SEE ALSO
.IP \[bu] 2
unicorn(1)
.IP \[bu] 2
\f[I]Rack::Builder\f[] ri/RDoc
.IP \[bu] 2
\f[I]Unicorn::Configurator\f[] ri/RDoc
.IP \[bu] 2
Rainbows! RDoc (http://rainbows.bogomips.org/)
.IP \[bu] 2
Rack RDoc (http://rdoc.info/gems/r#/gems/rack/frames)
.IP \[bu] 2
Rackup HowTo (http://wiki.github.com/rack/rack/tutorial-rackup-howto)
.SH AUTHORS
Rainbows! Hackers .
rainbows-5.0.0/HACKING0000644000004100000410000000402612641135250014360 0ustar  www-datawww-data= Rainbows! Hacker's Guide

=== Tests

All tests are written in POSIX shell.  See README file in the t/ directory.

=== Documentation

Due to the lack of RDoc-to-manpage converters we know about, we're
writing manpages in Markdown and converting to troff/HTML with Pandoc.

Please wrap documentation at 72 characters-per-line or less (long URLs
are exempt) so it is comfortably readable from terminals.

When referencing mailing list posts, use
"http://bogomips.org/rainbows-public/m/$MESSAGE_ID" if possible since
the Message-ID remains searchable even if the website becomes unavailable.

== Contributing

Contributions are welcome in the form of patches, pull requests, code
review, testing, documentation, user support or any other feedback is
welcome.  The mailing list is the central coordination point for all
user and developer feedback and bug reports.

=== Submitting Patches

Follow conventions already established in the code and do not exceed 80
characters per line.

Inline patches (from "git format-patch -M") to the mailing list are
preferred because they allow code review and comments in the reply to
the patch.

We will adhere to mostly the same conventions for patch submissions as
git itself.  See the Documentation/SubmittingPatches document
distributed with git on on patch submission guidelines to follow.  Just
don't email the git mailing list or maintainer with Rainbows! patches :)

No subscription is required to post to the mailing list at
rainbows-public@bogomips.org

Please Cc: everyone on replies, as not everyone is subscribed.

== Building a Gem

In order to build the gem, you must install the following components:

 * olddoc (RubyGem)
 * pandoc

You can build the Unicorn gem with the following command:

  gmake gem

== Running Development Versions

It is easy to install the contents of your git working directory:

Via RubyGems (recommended):

  gmake install-gem

Without RubyGems (via setup.rb):

  ruby setup.rb

It is not at all recommended to mix a RubyGems installation with an
installation done without RubyGems, however.
rainbows-5.0.0/archive/0000755000004100000410000000000012641135250015010 5ustar  www-datawww-datarainbows-5.0.0/archive/slrnpull.conf0000644000004100000410000000027512641135250017536 0ustar  www-datawww-data# group_name                         max        expire     headers_only
gmane.comp.lang.ruby.rainbows.general 1000000000 1000000000 0

# usage: slrnpull -d $PWD -h news.gmane.org --no-post
rainbows-5.0.0/archive/.gitignore0000644000004100000410000000002612641135250016776 0ustar  www-datawww-data/data
/news
/requests
rainbows-5.0.0/archive/rfmig.rb0000644000004100000410000000173412641135250016446 0ustar  www-datawww-data#!/usr/bin/env ruby
require 'find'
require 'fileutils'
rfdir = 'rubyforge.org:/var/www/gforge-projects/rainbows/'
newbase = 'http://rainbows.bogomips.org/'
refresh = ''
old = 'rf.old'
new = 'rf.new'
cmd = %W(rsync -av #{rfdir} #{old}/)
unless File.directory?(old)
  system(*cmd) or abort "#{cmd.inspect} failed: #$?"
end

Find.find(old) do |path|
  path =~ /\.html\z/ or next
  data = File.read(path)
  tmp = path.split(%r{/})
  tmp.shift == old or abort "BUG"
  dst = "#{new}/#{tmp.join('/')}"

  tmp[-1] = '' if tmp[-1] == "index.html"
  url = "#{newbase}#{tmp.join('/')}"
  meta = sprintf(refresh, url)
  data.sub!(/(]*>)/i, "#$1#{meta}")
  data.sub!(/(]*>)/i,
            "#{$1}Redirecting to #{url} ...
") FileUtils.mkdir_p(File.dirname(dst)) File.open(dst, "w") { |fp| fp.write(data) } end print "Verify results in #{new}/, then run:\n " puts %W(rsync -av #{new}/ #{rfdir}).join(' ') rainbows-5.0.0/TUNING0000644000004100000410000000375212641135250014325 0ustar www-datawww-data= Tuning \Rainbows! Most of the {tuning notes}[http://unicorn.bogomips.org/TUNING.html] apply to \Rainbows! as well. \Rainbows! is not particularly optimized at the moment and is designed for applications that spend large amounts of the time waiting on network activity. Thus memory usage and memory bandwidth for keeping connections open are often limiting factors as well. As of October 2009, absolutely ZERO work has been done for performance validation and tuning. Furthermore, \Rainbows! is NOT expected to do well on traditional benchmarks. Remember that \Rainbows! is only designed for applications that sleep and/or trickle network traffic. In the future, *may* do well in traditional benchmarks as a side effect, but that will never be the primary goal of the project. == \Rainbows! configuration * Don't set +worker_connections+ too high. It is often better to start denying requests and only serve the clients you can than to be completely bogged down and be unusable for everybody. * Increase +worker_processes+ if you have resources (RAM/DB connections) available. Additional worker processes can better utilize SMP, are more robust against crashes and are more likely to be fairly scheduled by the kernel. * If your workers do not seem to be releasing memory to the OS after traffic spikes, consider the {mall}[http://bogomips.org/mall/] library which allows access to the mallopt(3) function from Ruby. As of October 2009 tcmalloc (the default allocator for Ruby Enterprise Edition) does not release memory back to the kernel, the best it can do is use madvise(2) in an effort to swap out unused pages. == nginx configuration If you intend to use nginx as a reverse-proxy in front of \Rainbows! to handle Comet applications, make sure you disable proxy response buffering in nginx: proxy_buffering off; This can be disabled on a per-backend basis in nginx, so under no circumstances should you disable response buffering to Unicorn backends, only to \Rainbows! backends. rainbows-5.0.0/.gitignore0000644000004100000410000000034212641135250015356 0ustar www-datawww-data*.bundle *.log *.so *.rbc .DS_Store /.config /InstalledFiles /doc /local.mk /test/install-* log/ pkg/ /vendor /NEWS* /ChangeLog /.manifest /GIT-VERSION-FILE /man /LATEST tags TAGS /Summary /rainbows_1 /lib/rainbows/version.rb rainbows-5.0.0/pkg.mk0000644000004100000410000001030112641135250014474 0ustar www-datawww-dataRUBY = ruby RAKE = rake RSYNC = rsync OLDDOC = olddoc RDOC = rdoc GIT-VERSION-FILE: .FORCE-GIT-VERSION-FILE @./GIT-VERSION-GEN -include GIT-VERSION-FILE -include local.mk DLEXT := $(shell $(RUBY) -rrbconfig -e 'puts RbConfig::CONFIG["DLEXT"]') RUBY_VERSION := $(shell $(RUBY) -e 'puts RUBY_VERSION') RUBY_ENGINE := $(shell $(RUBY) -e 'puts((RUBY_ENGINE rescue "ruby"))') lib := lib ifeq ($(shell test -f script/isolate_for_tests && echo t),t) isolate_libs := tmp/isolate/$(RUBY_ENGINE)-$(RUBY_VERSION)/isolate.mk $(isolate_libs): script/isolate_for_tests @$(RUBY) script/isolate_for_tests -include $(isolate_libs) lib := $(lib):$(ISOLATE_LIBS) endif ext := $(firstword $(wildcard ext/*)) ifneq ($(ext),) ext_pfx := tmp/ext/$(RUBY_ENGINE)-$(RUBY_VERSION) ext_h := $(wildcard $(ext)/*/*.h $(ext)/*.h) ext_src := $(wildcard $(ext)/*.c $(ext_h)) ext_pfx_src := $(addprefix $(ext_pfx)/,$(ext_src)) ext_d := $(ext_pfx)/$(ext)/.d $(ext)/extconf.rb: $(wildcard $(ext)/*.h) @>> $@ $(ext_d): @mkdir -p $(@D) @> $@ $(ext_pfx)/$(ext)/%: $(ext)/% $(ext_d) install -m 644 $< $@ $(ext_pfx)/$(ext)/Makefile: $(ext)/extconf.rb $(ext_d) $(ext_h) $(RM) -f $(@D)/*.o cd $(@D) && $(RUBY) $(CURDIR)/$(ext)/extconf.rb ext_sfx := _ext.$(DLEXT) ext_dl := $(ext_pfx)/$(ext)/$(notdir $(ext)_ext.$(DLEXT)) $(ext_dl): $(ext_src) $(ext_pfx_src) $(ext_pfx)/$(ext)/Makefile @echo $^ == $@ $(MAKE) -C $(@D) lib := $(lib):$(ext_pfx)/$(ext) build: $(ext_dl) else build: endif pkg_extra += GIT-VERSION-FILE NEWS LATEST NEWS: GIT-VERSION-FILE .olddoc.yml $(OLDDOC) prepare LATEST: NEWS manifest: $(RM) .manifest $(MAKE) .manifest .manifest: $(pkg_extra) (git ls-files && for i in $@ $(pkg_extra); do echo $$i; done) | \ LC_ALL=C sort > $@+ cmp $@+ $@ || mv $@+ $@ $(RM) $@+ doc:: .document .olddoc.yml $(pkg_extra) $(PLACEHOLDERS) -find lib -type f -name '*.rbc' -exec rm -f '{}' ';' -find ext -type f -name '*.rbc' -exec rm -f '{}' ';' $(RM) -r doc $(RDOC) -f oldweb $(OLDDOC) merge install -m644 COPYING doc/COPYING install -m644 NEWS doc/NEWS install -m644 NEWS.atom.xml doc/NEWS.atom.xml install -m644 $(shell LC_ALL=C grep '^[A-Z]' .document) doc/ ifneq ($(VERSION),) pkggem := pkg/$(rfpackage)-$(VERSION).gem pkgtgz := pkg/$(rfpackage)-$(VERSION).tgz # ensures we're actually on the tagged $(VERSION), only used for release verify: test x"$(shell umask)" = x0022 git rev-parse --verify refs/tags/v$(VERSION)^{} git diff-index --quiet HEAD^0 test $$(git rev-parse --verify HEAD^0) = \ $$(git rev-parse --verify refs/tags/v$(VERSION)^{}) fix-perms: -git ls-tree -r HEAD | awk '/^100644 / {print $$NF}' | xargs chmod 644 -git ls-tree -r HEAD | awk '/^100755 / {print $$NF}' | xargs chmod 755 gem: $(pkggem) install-gem: $(pkggem) gem install $(CURDIR)/$< $(pkggem): manifest fix-perms gem build $(rfpackage).gemspec mkdir -p pkg mv $(@F) $@ $(pkgtgz): distdir = $(basename $@) $(pkgtgz): HEAD = v$(VERSION) $(pkgtgz): manifest fix-perms @test -n "$(distdir)" $(RM) -r $(distdir) mkdir -p $(distdir) tar cf - $$(cat .manifest) | (cd $(distdir) && tar xf -) cd pkg && tar cf - $(basename $(@F)) | gzip -9 > $(@F)+ mv $@+ $@ package: $(pkgtgz) $(pkggem) release:: verify package # push gem to RubyGems.org gem push $(pkggem) else gem install-gem: GIT-VERSION-FILE $(MAKE) $@ VERSION=$(GIT_VERSION) endif all:: test test_units := $(wildcard test/test_*.rb) test: test-unit test-unit: $(test_units) $(test_units): build $(RUBY) -I $(lib) $@ $(RUBY_TEST_OPTS) # this requires GNU coreutils variants ifneq ($(RSYNC_DEST),) publish_doc: -git set-file-times $(MAKE) doc $(MAKE) doc_gz $(RSYNC) -av doc/ $(RSYNC_DEST)/ git ls-files | xargs touch endif # Create gzip variants of the same timestamp as the original so nginx # "gzip_static on" can serve the gzipped versions directly. doc_gz: docs = $(shell find doc -type f ! -regex '^.*\.gz$$') doc_gz: for i in $(docs); do \ gzip --rsyncable -9 < $$i > $$i.gz; touch -r $$i $$i.gz; done check-warnings: @(for i in $$(git ls-files '*.rb'| grep -v '^setup\.rb$$'); \ do $(RUBY) -d -W2 -c $$i; done) | grep -v '^Syntax OK$$' || : ifneq ($(PLACEHOLDERS),) $(PLACEHOLDERS): echo olddoc_placeholder > $@ endif .PHONY: all .FORCE-GIT-VERSION-FILE doc test $(test_units) manifest .PHONY: check-warnings rainbows-5.0.0/GIT-VERSION-FILE0000644000004100000410000000002412641135250015571 0ustar www-datawww-dataGIT_VERSION = 5.0.0 rainbows-5.0.0/LICENSE0000644000004100000410000000564312641135250014404 0ustar www-datawww-data\Rainbows! is copyrighted Free Software by all contributors, see the logs in revision control for names and email addresses of all of them. You can redistribute it and/or modify it under either the terms of the GNU General Public License (GPL) as published by the Free Software Foundation (FSF), either version 2 of the License, or (at your option) any later version. We currently prefer the GPLv3 or later for derivative works, but the GPLv2 is fine. The complete texts of the GPLv2 and GPLv3 are below: GPLv2 - http://www.gnu.org/licenses/gpl-2.0.txt GPLv3 - http://www.gnu.org/licenses/gpl-3.0.txt You may (against our _preference_) also use the Ruby 1.8 license terms. === Ruby 1.8-specific terms (if you're not using the GPLv2/GPLv3) 1. You may make and give away verbatim copies of the source form of the software without restriction, provided that you duplicate all of the original copyright notices and associated disclaimers. 2. You may modify your copy of the software in any way, provided that you do at least ONE of the following: a) place your modifications in the Public Domain or otherwise make them Freely Available, such as by posting said modifications to Usenet or an equivalent medium, or by allowing the author to include your modifications in the software. b) use the modified software only within your corporation or organization. c) rename any non-standard executables so the names do not conflict with standard executables, which must also be provided. d) make other distribution arrangements with the author. 3. You may distribute the software in object code or executable form, provided that you do at least ONE of the following: a) distribute the executables and library files of the software, together with instructions (in the manual page or equivalent) on where to get the original distribution. b) accompany the distribution with the machine-readable source of the software. c) give non-standard executables non-standard names, with instructions on where to get the original software distribution. d) make other distribution arrangements with the author. 4. You may modify and include the part of the software into any other software (possibly commercial). But some files in the distribution are not written by the author, so that they are not under this terms. 5. The scripts and library files supplied as input to or produced as output from the software do not automatically fall under the copyright of the software, but belong to whomever generated them, and may be sold commercially, and may be aggregated with this software. 6. THIS SOFTWARE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. rainbows-5.0.0/setup.rb0000644000004100000410000010652612641135250015066 0ustar www-datawww-data# -*- encoding: binary -*- # # setup.rb # # Copyright (c) 2000-2005 Minero Aoki # # This program is free software. # You can distribute/modify this program under the terms of # the GNU LGPL, Lesser General Public License version 2.1. # unless Enumerable.method_defined?(:map) # Ruby 1.4.6 module Enumerable alias map collect end end unless File.respond_to?(:read) # Ruby 1.6 def File.read(fname) open(fname) {|f| return f.read } end end unless Errno.const_defined?(:ENOTEMPTY) # Windows? module Errno class ENOTEMPTY # We do not raise this exception, implementation is not needed. end end end def File.binread(fname) open(fname, 'rb') {|f| return f.read } end # for corrupted Windows' stat(2) def File.dir?(path) File.directory?((path[-1,1] == '/') ? path : path + '/') end class ConfigTable include Enumerable def initialize(rbconfig) @rbconfig = rbconfig @items = [] @table = {} # options @install_prefix = nil @config_opt = nil @verbose = true @no_harm = false end attr_accessor :install_prefix attr_accessor :config_opt attr_writer :verbose def verbose? @verbose end attr_writer :no_harm def no_harm? @no_harm end def [](key) lookup(key).resolve(self) end def []=(key, val) lookup(key).set val end def names @items.map {|i| i.name } end def each(&block) @items.each(&block) end def key?(name) @table.key?(name) end def lookup(name) @table[name] or setup_rb_error "no such config item: #{name}" end def add(item) @items.push item @table[item.name] = item end def remove(name) item = lookup(name) @items.delete_if {|i| i.name == name } @table.delete_if {|name, i| i.name == name } item end def load_script(path, inst = nil) if File.file?(path) MetaConfigEnvironment.new(self, inst).instance_eval File.read(path), path end end def savefile '.config' end def load_savefile begin File.foreach(savefile()) do |line| k, v = *line.split(/=/, 2) self[k] = v.strip end rescue Errno::ENOENT setup_rb_error $!.message + "\n#{File.basename($0)} config first" end end def save @items.each {|i| i.value } File.open(savefile(), 'w') {|f| @items.each do |i| f.printf "%s=%s\n", i.name, i.value if i.value? and i.value end } end def load_standard_entries standard_entries(@rbconfig).each do |ent| add ent end end def standard_entries(rbconfig) c = rbconfig rubypath = File.join(c['bindir'], c['ruby_install_name'] + c['EXEEXT']) major = c['MAJOR'].to_i minor = c['MINOR'].to_i teeny = c['TEENY'].to_i version = "#{major}.#{minor}" # ruby ver. >= 1.4.4? newpath_p = ((major >= 2) or ((major == 1) and ((minor >= 5) or ((minor == 4) and (teeny >= 4))))) if c['rubylibdir'] # V > 1.6.3 libruby = "#{c['prefix']}/lib/ruby" librubyver = c['rubylibdir'] librubyverarch = c['archdir'] siteruby = c['sitedir'] siterubyver = c['sitelibdir'] siterubyverarch = c['sitearchdir'] elsif newpath_p # 1.4.4 <= V <= 1.6.3 libruby = "#{c['prefix']}/lib/ruby" librubyver = "#{c['prefix']}/lib/ruby/#{version}" librubyverarch = "#{c['prefix']}/lib/ruby/#{version}/#{c['arch']}" siteruby = c['sitedir'] siterubyver = "$siteruby/#{version}" siterubyverarch = "$siterubyver/#{c['arch']}" else # V < 1.4.4 libruby = "#{c['prefix']}/lib/ruby" librubyver = "#{c['prefix']}/lib/ruby/#{version}" librubyverarch = "#{c['prefix']}/lib/ruby/#{version}/#{c['arch']}" siteruby = "#{c['prefix']}/lib/ruby/#{version}/site_ruby" siterubyver = siteruby siterubyverarch = "$siterubyver/#{c['arch']}" end parameterize = lambda {|path| path.sub(/\A#{Regexp.quote(c['prefix'])}/, '$prefix') } if arg = c['configure_args'].split.detect {|arg| /--with-make-prog=/ =~ arg } makeprog = arg.sub(/'/, '').split(/=/, 2)[1] else makeprog = 'make' end [ ExecItem.new('installdirs', 'std/site/home', 'std: install under libruby; site: install under site_ruby; home: install under $HOME')\ {|val, table| case val when 'std' table['rbdir'] = '$librubyver' table['sodir'] = '$librubyverarch' when 'site' table['rbdir'] = '$siterubyver' table['sodir'] = '$siterubyverarch' when 'home' setup_rb_error '$HOME was not set' unless ENV['HOME'] table['prefix'] = ENV['HOME'] table['rbdir'] = '$libdir/ruby' table['sodir'] = '$libdir/ruby' end }, PathItem.new('prefix', 'path', c['prefix'], 'path prefix of target environment'), PathItem.new('bindir', 'path', parameterize.call(c['bindir']), 'the directory for commands'), PathItem.new('libdir', 'path', parameterize.call(c['libdir']), 'the directory for libraries'), PathItem.new('datadir', 'path', parameterize.call(c['datadir']), 'the directory for shared data'), PathItem.new('mandir', 'path', parameterize.call(c['mandir']), 'the directory for man pages'), PathItem.new('sysconfdir', 'path', parameterize.call(c['sysconfdir']), 'the directory for system configuration files'), PathItem.new('localstatedir', 'path', parameterize.call(c['localstatedir']), 'the directory for local state data'), PathItem.new('libruby', 'path', libruby, 'the directory for ruby libraries'), PathItem.new('librubyver', 'path', librubyver, 'the directory for standard ruby libraries'), PathItem.new('librubyverarch', 'path', librubyverarch, 'the directory for standard ruby extensions'), PathItem.new('siteruby', 'path', siteruby, 'the directory for version-independent aux ruby libraries'), PathItem.new('siterubyver', 'path', siterubyver, 'the directory for aux ruby libraries'), PathItem.new('siterubyverarch', 'path', siterubyverarch, 'the directory for aux ruby binaries'), PathItem.new('rbdir', 'path', '$siterubyver', 'the directory for ruby scripts'), PathItem.new('sodir', 'path', '$siterubyverarch', 'the directory for ruby extentions'), PathItem.new('rubypath', 'path', rubypath, 'the path to set to #! line'), ProgramItem.new('rubyprog', 'name', rubypath, 'the ruby program using for installation'), ProgramItem.new('makeprog', 'name', makeprog, 'the make program to compile ruby extentions'), SelectItem.new('shebang', 'all/ruby/never', 'ruby', 'shebang line (#!) editing mode'), BoolItem.new('without-ext', 'yes/no', 'no', 'does not compile/install ruby extentions') ] end private :standard_entries def load_multipackage_entries multipackage_entries().each do |ent| add ent end end def multipackage_entries [ PackageSelectionItem.new('with', 'name,name...', '', 'ALL', 'package names that you want to install'), PackageSelectionItem.new('without', 'name,name...', '', 'NONE', 'package names that you do not want to install') ] end private :multipackage_entries ALIASES = { 'std-ruby' => 'librubyver', 'stdruby' => 'librubyver', 'rubylibdir' => 'librubyver', 'archdir' => 'librubyverarch', 'site-ruby-common' => 'siteruby', # For backward compatibility 'site-ruby' => 'siterubyver', # For backward compatibility 'bin-dir' => 'bindir', 'bin-dir' => 'bindir', 'rb-dir' => 'rbdir', 'so-dir' => 'sodir', 'data-dir' => 'datadir', 'ruby-path' => 'rubypath', 'ruby-prog' => 'rubyprog', 'ruby' => 'rubyprog', 'make-prog' => 'makeprog', 'make' => 'makeprog' } def fixup ALIASES.each do |ali, name| @table[ali] = @table[name] end @items.freeze @table.freeze @options_re = /\A--(#{@table.keys.join('|')})(?:=(.*))?\z/ end def parse_opt(opt) m = @options_re.match(opt) or setup_rb_error "config: unknown option #{opt}" m.to_a[1,2] end def dllext @rbconfig['DLEXT'] end def value_config?(name) lookup(name).value? end class Item def initialize(name, template, default, desc) @name = name.freeze @template = template @value = default @default = default @description = desc end attr_reader :name attr_reader :description attr_accessor :default alias help_default default def help_opt "--#{@name}=#{@template}" end def value? true end def value @value end def resolve(table) @value.gsub(%r<\$([^/]+)>) { table[$1] } end def set(val) @value = check(val) end private def check(val) setup_rb_error "config: --#{name} requires argument" unless val val end end class BoolItem < Item def config_type 'bool' end def help_opt "--#{@name}" end private def check(val) return 'yes' unless val case val when /\Ay(es)?\z/i, /\At(rue)?\z/i then 'yes' when /\An(o)?\z/i, /\Af(alse)\z/i then 'no' else setup_rb_error "config: --#{@name} accepts only yes/no for argument" end end end class PathItem < Item def config_type 'path' end private def check(path) setup_rb_error "config: --#{@name} requires argument" unless path path[0,1] == '$' ? path : File.expand_path(path) end end class ProgramItem < Item def config_type 'program' end end class SelectItem < Item def initialize(name, selection, default, desc) super @ok = selection.split('/') end def config_type 'select' end private def check(val) unless @ok.include?(val.strip) setup_rb_error "config: use --#{@name}=#{@template} (#{val})" end val.strip end end class ExecItem < Item def initialize(name, selection, desc, &block) super name, selection, nil, desc @ok = selection.split('/') @action = block end def config_type 'exec' end def value? false end def resolve(table) setup_rb_error "$#{name()} wrongly used as option value" end undef set def evaluate(val, table) v = val.strip.downcase unless @ok.include?(v) setup_rb_error "invalid option --#{@name}=#{val} (use #{@template})" end @action.call v, table end end class PackageSelectionItem < Item def initialize(name, template, default, help_default, desc) super name, template, default, desc @help_default = help_default end attr_reader :help_default def config_type 'package' end private def check(val) unless File.dir?("packages/#{val}") setup_rb_error "config: no such package: #{val}" end val end end class MetaConfigEnvironment def initialize(config, installer) @config = config @installer = installer end def config_names @config.names end def config?(name) @config.key?(name) end def bool_config?(name) @config.lookup(name).config_type == 'bool' end def path_config?(name) @config.lookup(name).config_type == 'path' end def value_config?(name) @config.lookup(name).config_type != 'exec' end def add_config(item) @config.add item end def add_bool_config(name, default, desc) @config.add BoolItem.new(name, 'yes/no', default ? 'yes' : 'no', desc) end def add_path_config(name, default, desc) @config.add PathItem.new(name, 'path', default, desc) end def set_config_default(name, default) @config.lookup(name).default = default end def remove_config(name) @config.remove(name) end # For only multipackage def packages raise '[setup.rb fatal] multi-package metaconfig API packages() called for single-package; contact application package vendor' unless @installer @installer.packages end # For only multipackage def declare_packages(list) raise '[setup.rb fatal] multi-package metaconfig API declare_packages() called for single-package; contact application package vendor' unless @installer @installer.packages = list end end end # class ConfigTable # This module requires: #verbose?, #no_harm? module FileOperations def mkdir_p(dirname, prefix = nil) dirname = prefix + File.expand_path(dirname) if prefix $stderr.puts "mkdir -p #{dirname}" if verbose? return if no_harm? # Does not check '/', it's too abnormal. dirs = File.expand_path(dirname).split(%r<(?=/)>) if /\A[a-z]:\z/i =~ dirs[0] disk = dirs.shift dirs[0] = disk + dirs[0] end dirs.each_index do |idx| path = dirs[0..idx].join('') Dir.mkdir path unless File.dir?(path) end end def rm_f(path) $stderr.puts "rm -f #{path}" if verbose? return if no_harm? force_remove_file path end def rm_rf(path) $stderr.puts "rm -rf #{path}" if verbose? return if no_harm? remove_tree path end def remove_tree(path) if File.symlink?(path) remove_file path elsif File.dir?(path) remove_tree0 path else force_remove_file path end end def remove_tree0(path) Dir.foreach(path) do |ent| next if ent == '.' next if ent == '..' entpath = "#{path}/#{ent}" if File.symlink?(entpath) remove_file entpath elsif File.dir?(entpath) remove_tree0 entpath else force_remove_file entpath end end begin Dir.rmdir path rescue Errno::ENOTEMPTY # directory may not be empty end end def move_file(src, dest) force_remove_file dest begin File.rename src, dest rescue File.open(dest, 'wb') {|f| f.write File.binread(src) } File.chmod File.stat(src).mode, dest File.unlink src end end def force_remove_file(path) begin remove_file path rescue end end def remove_file(path) File.chmod 0777, path File.unlink path end def install(from, dest, mode, prefix = nil) $stderr.puts "install #{from} #{dest}" if verbose? return if no_harm? realdest = prefix ? prefix + File.expand_path(dest) : dest realdest = File.join(realdest, File.basename(from)) if File.dir?(realdest) str = File.binread(from) if diff?(str, realdest) verbose_off { rm_f realdest if File.exist?(realdest) } File.open(realdest, 'wb') {|f| f.write str } File.chmod mode, realdest File.open("#{objdir_root()}/InstalledFiles", 'a') {|f| if prefix f.puts realdest.sub(prefix, '') else f.puts realdest end } end end def diff?(new_content, path) return true unless File.exist?(path) new_content != File.binread(path) end def command(*args) $stderr.puts args.join(' ') if verbose? system(*args) or raise RuntimeError, "system(#{args.map{|a| a.inspect }.join(' ')}) failed" end def ruby(*args) command config('rubyprog'), *args end def make(task = nil) command(*[config('makeprog'), task].compact) end def extdir?(dir) File.exist?("#{dir}/MANIFEST") or File.exist?("#{dir}/extconf.rb") end def files_of(dir) Dir.open(dir) {|d| return d.select {|ent| File.file?("#{dir}/#{ent}") } } end DIR_REJECT = %w( . .. CVS SCCS RCS CVS.adm .svn ) def directories_of(dir) Dir.open(dir) {|d| return d.select {|ent| File.dir?("#{dir}/#{ent}") } - DIR_REJECT } end end # This module requires: #srcdir_root, #objdir_root, #relpath module HookScriptAPI def get_config(key) @config[key] end alias config get_config # obsolete: use metaconfig to change configuration def set_config(key, val) @config[key] = val end # # srcdir/objdir (works only in the package directory) # def curr_srcdir "#{srcdir_root()}/#{relpath()}" end def curr_objdir "#{objdir_root()}/#{relpath()}" end def srcfile(path) "#{curr_srcdir()}/#{path}" end def srcexist?(path) File.exist?(srcfile(path)) end def srcdirectory?(path) File.dir?(srcfile(path)) end def srcfile?(path) File.file?(srcfile(path)) end def srcentries(path = '.') Dir.open("#{curr_srcdir()}/#{path}") {|d| return d.to_a - %w(. ..) } end def srcfiles(path = '.') srcentries(path).select {|fname| File.file?(File.join(curr_srcdir(), path, fname)) } end def srcdirectories(path = '.') srcentries(path).select {|fname| File.dir?(File.join(curr_srcdir(), path, fname)) } end end class ToplevelInstaller Version = '3.4.1' Copyright = 'Copyright (c) 2000-2005 Minero Aoki' TASKS = [ [ 'all', 'do config, setup, then install' ], [ 'config', 'saves your configurations' ], [ 'show', 'shows current configuration' ], [ 'setup', 'compiles ruby extentions and others' ], [ 'install', 'installs files' ], [ 'test', 'run all tests in test/' ], [ 'clean', "does `make clean' for each extention" ], [ 'distclean',"does `make distclean' for each extention" ] ] def ToplevelInstaller.invoke config = ConfigTable.new(load_rbconfig()) config.load_standard_entries config.load_multipackage_entries if multipackage? config.fixup klass = (multipackage?() ? ToplevelInstallerMulti : ToplevelInstaller) klass.new(File.dirname($0), config).invoke end def ToplevelInstaller.multipackage? File.dir?(File.dirname($0) + '/packages') end def ToplevelInstaller.load_rbconfig if arg = ARGV.detect {|arg| /\A--rbconfig=/ =~ arg } ARGV.delete(arg) load File.expand_path(arg.split(/=/, 2)[1]) $".push 'rbconfig.rb' else require 'rbconfig' end ::Config::CONFIG end def initialize(ardir_root, config) @ardir = File.expand_path(ardir_root) @config = config # cache @valid_task_re = nil end def config(key) @config[key] end def inspect "#<#{self.class} #{__id__()}>" end def invoke run_metaconfigs case task = parsearg_global() when nil, 'all' parsearg_config init_installers exec_config exec_setup exec_install else case task when 'config', 'test' ; when 'clean', 'distclean' @config.load_savefile if File.exist?(@config.savefile) else @config.load_savefile end __send__ "parsearg_#{task}" init_installers __send__ "exec_#{task}" end end def run_metaconfigs @config.load_script "#{@ardir}/metaconfig" end def init_installers @installer = Installer.new(@config, @ardir, File.expand_path('.')) end # # Hook Script API bases # def srcdir_root @ardir end def objdir_root '.' end def relpath '.' end # # Option Parsing # def parsearg_global while arg = ARGV.shift case arg when /\A\w+\z/ setup_rb_error "invalid task: #{arg}" unless valid_task?(arg) return arg when '-q', '--quiet' @config.verbose = false when '--verbose' @config.verbose = true when '--help' print_usage $stdout exit 0 when '--version' puts "#{File.basename($0)} version #{Version}" exit 0 when '--copyright' puts Copyright exit 0 else setup_rb_error "unknown global option '#{arg}'" end end nil end def valid_task?(t) valid_task_re() =~ t end def valid_task_re @valid_task_re ||= /\A(?:#{TASKS.map {|task,desc| task }.join('|')})\z/ end def parsearg_no_options unless ARGV.empty? task = caller(0).first.slice(%r<`parsearg_(\w+)'>, 1) setup_rb_error "#{task}: unknown options: #{ARGV.join(' ')}" end end alias parsearg_show parsearg_no_options alias parsearg_setup parsearg_no_options alias parsearg_test parsearg_no_options alias parsearg_clean parsearg_no_options alias parsearg_distclean parsearg_no_options def parsearg_config evalopt = [] set = [] @config.config_opt = [] while i = ARGV.shift if /\A--?\z/ =~ i @config.config_opt = ARGV.dup break end name, value = *@config.parse_opt(i) if @config.value_config?(name) @config[name] = value else evalopt.push [name, value] end set.push name end evalopt.each do |name, value| @config.lookup(name).evaluate value, @config end # Check if configuration is valid set.each do |n| @config[n] if @config.value_config?(n) end end def parsearg_install @config.no_harm = false @config.install_prefix = '' while a = ARGV.shift case a when '--no-harm' @config.no_harm = true when /\A--prefix=/ path = a.split(/=/, 2)[1] path = File.expand_path(path) unless path[0,1] == '/' @config.install_prefix = path else setup_rb_error "install: unknown option #{a}" end end end def print_usage(out) out.puts 'Typical Installation Procedure:' out.puts " $ ruby #{File.basename $0} config" out.puts " $ ruby #{File.basename $0} setup" out.puts " # ruby #{File.basename $0} install (may require root privilege)" out.puts out.puts 'Detailed Usage:' out.puts " ruby #{File.basename $0} " out.puts " ruby #{File.basename $0} [] []" fmt = " %-24s %s\n" out.puts out.puts 'Global options:' out.printf fmt, '-q,--quiet', 'suppress message outputs' out.printf fmt, ' --verbose', 'output messages verbosely' out.printf fmt, ' --help', 'print this message' out.printf fmt, ' --version', 'print version and quit' out.printf fmt, ' --copyright', 'print copyright and quit' out.puts out.puts 'Tasks:' TASKS.each do |name, desc| out.printf fmt, name, desc end fmt = " %-24s %s [%s]\n" out.puts out.puts 'Options for CONFIG or ALL:' @config.each do |item| out.printf fmt, item.help_opt, item.description, item.help_default end out.printf fmt, '--rbconfig=path', 'rbconfig.rb to load',"running ruby's" out.puts out.puts 'Options for INSTALL:' out.printf fmt, '--no-harm', 'only display what to do if given', 'off' out.printf fmt, '--prefix=path', 'install path prefix', '' out.puts end # # Task Handlers # def exec_config @installer.exec_config @config.save # must be final end def exec_setup @installer.exec_setup end def exec_install @installer.exec_install end def exec_test @installer.exec_test end def exec_show @config.each do |i| printf "%-20s %s\n", i.name, i.value if i.value? end end def exec_clean @installer.exec_clean end def exec_distclean @installer.exec_distclean end end # class ToplevelInstaller class ToplevelInstallerMulti < ToplevelInstaller include FileOperations def initialize(ardir_root, config) super @packages = directories_of("#{@ardir}/packages") raise 'no package exists' if @packages.empty? @root_installer = Installer.new(@config, @ardir, File.expand_path('.')) end def run_metaconfigs @config.load_script "#{@ardir}/metaconfig", self @packages.each do |name| @config.load_script "#{@ardir}/packages/#{name}/metaconfig" end end attr_reader :packages def packages=(list) raise 'package list is empty' if list.empty? list.each do |name| raise "directory packages/#{name} does not exist"\ unless File.dir?("#{@ardir}/packages/#{name}") end @packages = list end def init_installers @installers = {} @packages.each do |pack| @installers[pack] = Installer.new(@config, "#{@ardir}/packages/#{pack}", "packages/#{pack}") end with = extract_selection(config('with')) without = extract_selection(config('without')) @selected = @installers.keys.select {|name| (with.empty? or with.include?(name)) \ and not without.include?(name) } end def extract_selection(list) a = list.split(/,/) a.each do |name| setup_rb_error "no such package: #{name}" unless @installers.key?(name) end a end def print_usage(f) super f.puts 'Inluded packages:' f.puts ' ' + @packages.sort.join(' ') f.puts end # # Task Handlers # def exec_config run_hook 'pre-config' each_selected_installers {|inst| inst.exec_config } run_hook 'post-config' @config.save # must be final end def exec_setup run_hook 'pre-setup' each_selected_installers {|inst| inst.exec_setup } run_hook 'post-setup' end def exec_install run_hook 'pre-install' each_selected_installers {|inst| inst.exec_install } run_hook 'post-install' end def exec_test run_hook 'pre-test' each_selected_installers {|inst| inst.exec_test } run_hook 'post-test' end def exec_clean rm_f @config.savefile run_hook 'pre-clean' each_selected_installers {|inst| inst.exec_clean } run_hook 'post-clean' end def exec_distclean rm_f @config.savefile run_hook 'pre-distclean' each_selected_installers {|inst| inst.exec_distclean } run_hook 'post-distclean' end # # lib # def each_selected_installers Dir.mkdir 'packages' unless File.dir?('packages') @selected.each do |pack| $stderr.puts "Processing the package `#{pack}' ..." if verbose? Dir.mkdir "packages/#{pack}" unless File.dir?("packages/#{pack}") Dir.chdir "packages/#{pack}" yield @installers[pack] Dir.chdir '../..' end end def run_hook(id) @root_installer.run_hook id end # module FileOperations requires this def verbose? @config.verbose? end # module FileOperations requires this def no_harm? @config.no_harm? end end # class ToplevelInstallerMulti class Installer FILETYPES = %w( bin lib ext data conf man ) include FileOperations include HookScriptAPI def initialize(config, srcroot, objroot) @config = config @srcdir = File.expand_path(srcroot) @objdir = File.expand_path(objroot) @currdir = '.' end def inspect "#<#{self.class} #{File.basename(@srcdir)}>" end def noop(rel) end # # Hook Script API base methods # def srcdir_root @srcdir end def objdir_root @objdir end def relpath @currdir end # # Config Access # # module FileOperations requires this def verbose? @config.verbose? end # module FileOperations requires this def no_harm? @config.no_harm? end def verbose_off begin save, @config.verbose = @config.verbose?, false yield ensure @config.verbose = save end end # # TASK config # def exec_config exec_task_traverse 'config' end alias config_dir_bin noop alias config_dir_lib noop def config_dir_ext(rel) extconf if extdir?(curr_srcdir()) end alias config_dir_data noop alias config_dir_conf noop alias config_dir_man noop def extconf ruby "#{curr_srcdir()}/extconf.rb", *@config.config_opt end # # TASK setup # def exec_setup exec_task_traverse 'setup' end def setup_dir_bin(rel) files_of(curr_srcdir()).each do |fname| update_shebang_line "#{curr_srcdir()}/#{fname}" end end alias setup_dir_lib noop def setup_dir_ext(rel) make if extdir?(curr_srcdir()) end alias setup_dir_data noop alias setup_dir_conf noop alias setup_dir_man noop def update_shebang_line(path) return if no_harm? return if config('shebang') == 'never' old = Shebang.load(path) if old $stderr.puts "warning: #{path}: Shebang line includes too many args. It is not portable and your program may not work." if old.args.size > 1 new = new_shebang(old) return if new.to_s == old.to_s else return unless config('shebang') == 'all' new = Shebang.new(config('rubypath')) end $stderr.puts "updating shebang: #{File.basename(path)}" if verbose? open_atomic_writer(path) {|output| File.open(path, 'rb') {|f| f.gets if old # discard output.puts new.to_s output.print f.read } } end def new_shebang(old) if /\Aruby/ =~ File.basename(old.cmd) Shebang.new(config('rubypath'), old.args) elsif File.basename(old.cmd) == 'env' and old.args.first == 'ruby' Shebang.new(config('rubypath'), old.args[1..-1]) else return old unless config('shebang') == 'all' Shebang.new(config('rubypath')) end end def open_atomic_writer(path, &block) tmpfile = File.basename(path) + '.tmp' begin File.open(tmpfile, 'wb', &block) File.rename tmpfile, File.basename(path) ensure File.unlink tmpfile if File.exist?(tmpfile) end end class Shebang def Shebang.load(path) line = nil File.open(path) {|f| line = f.gets } return nil unless /\A#!/ =~ line parse(line) end def Shebang.parse(line) cmd, *args = *line.strip.sub(/\A\#!/, '').split(' ') new(cmd, args) end def initialize(cmd, args = []) @cmd = cmd @args = args end attr_reader :cmd attr_reader :args def to_s "#! #{@cmd}" + (@args.empty? ? '' : " #{@args.join(' ')}") end end # # TASK install # def exec_install rm_f 'InstalledFiles' exec_task_traverse 'install' end def install_dir_bin(rel) install_files targetfiles(), "#{config('bindir')}/#{rel}", 0755 end def install_dir_lib(rel) install_files libfiles(), "#{config('rbdir')}/#{rel}", 0644 end def install_dir_ext(rel) return unless extdir?(curr_srcdir()) install_files rubyextentions('.'), "#{config('sodir')}/#{File.dirname(rel)}", 0555 end def install_dir_data(rel) install_files targetfiles(), "#{config('datadir')}/#{rel}", 0644 end def install_dir_conf(rel) # FIXME: should not remove current config files # (rename previous file to .old/.org) install_files targetfiles(), "#{config('sysconfdir')}/#{rel}", 0644 end def install_dir_man(rel) install_files targetfiles(), "#{config('mandir')}/#{rel}", 0644 end def install_files(list, dest, mode) mkdir_p dest, @config.install_prefix list.each do |fname| install fname, dest, mode, @config.install_prefix end end def libfiles glob_reject(%w(*.y *.output), targetfiles()) end def rubyextentions(dir) ents = glob_select("*.#{@config.dllext}", targetfiles()) if ents.empty? setup_rb_error "no ruby extention exists: 'ruby #{$0} setup' first" end ents end def targetfiles mapdir(existfiles() - hookfiles()) end def mapdir(ents) ents.map {|ent| if File.exist?(ent) then ent # objdir else "#{curr_srcdir()}/#{ent}" # srcdir end } end # picked up many entries from cvs-1.11.1/src/ignore.c JUNK_FILES = %w( core RCSLOG tags TAGS .make.state .nse_depinfo #* .#* cvslog.* ,* .del-* *.olb *~ *.old *.bak *.BAK *.orig *.rej _$* *$ *.org *.in .* ) def existfiles glob_reject(JUNK_FILES, (files_of(curr_srcdir()) | files_of('.'))) end def hookfiles %w( pre-%s post-%s pre-%s.rb post-%s.rb ).map {|fmt| %w( config setup install clean ).map {|t| sprintf(fmt, t) } }.flatten end def glob_select(pat, ents) re = globs2re([pat]) ents.select {|ent| re =~ ent } end def glob_reject(pats, ents) re = globs2re(pats) ents.reject {|ent| re =~ ent } end GLOB2REGEX = { '.' => '\.', '$' => '\$', '#' => '\#', '*' => '.*' } def globs2re(pats) /\A(?:#{ pats.map {|pat| pat.gsub(/[\.\$\#\*]/) {|ch| GLOB2REGEX[ch] } }.join('|') })\z/ end # # TASK test # TESTDIR = 'test' def exec_test unless File.directory?('test') $stderr.puts 'no test in this package' if verbose? return end $stderr.puts 'Running tests...' if verbose? begin require 'test/unit' rescue LoadError setup_rb_error 'test/unit cannot loaded. You need Ruby 1.8 or later to invoke this task.' end runner = Test::Unit::AutoRunner.new(true) runner.to_run << TESTDIR runner.run end # # TASK clean # def exec_clean exec_task_traverse 'clean' rm_f @config.savefile rm_f 'InstalledFiles' end alias clean_dir_bin noop alias clean_dir_lib noop alias clean_dir_data noop alias clean_dir_conf noop alias clean_dir_man noop def clean_dir_ext(rel) return unless extdir?(curr_srcdir()) make 'clean' if File.file?('Makefile') end # # TASK distclean # def exec_distclean exec_task_traverse 'distclean' rm_f @config.savefile rm_f 'InstalledFiles' end alias distclean_dir_bin noop alias distclean_dir_lib noop def distclean_dir_ext(rel) return unless extdir?(curr_srcdir()) make 'distclean' if File.file?('Makefile') end alias distclean_dir_data noop alias distclean_dir_conf noop alias distclean_dir_man noop # # Traversing # def exec_task_traverse(task) run_hook "pre-#{task}" FILETYPES.each do |type| if type == 'ext' and config('without-ext') == 'yes' $stderr.puts 'skipping ext/* by user option' if verbose? next end traverse task, type, "#{task}_dir_#{type}" end run_hook "post-#{task}" end def traverse(task, rel, mid) dive_into(rel) { run_hook "pre-#{task}" __send__ mid, rel.sub(%r[\A.*?(?:/|\z)], '') directories_of(curr_srcdir()).each do |d| traverse task, "#{rel}/#{d}", mid end run_hook "post-#{task}" } end def dive_into(rel) return unless File.dir?("#{@srcdir}/#{rel}") dir = File.basename(rel) Dir.mkdir dir unless File.dir?(dir) prevdir = Dir.pwd Dir.chdir dir $stderr.puts '---> ' + rel if verbose? @currdir = rel yield Dir.chdir prevdir $stderr.puts '<--- ' + rel if verbose? @currdir = File.dirname(rel) end def run_hook(id) path = [ "#{curr_srcdir()}/#{id}", "#{curr_srcdir()}/#{id}.rb" ].detect {|cand| File.file?(cand) } return unless path begin instance_eval File.read(path), path, 1 rescue raise if $DEBUG setup_rb_error "hook #{path} failed:\n" + $!.message end end end # class Installer class SetupError < StandardError; end def setup_rb_error(msg) raise SetupError, msg end if $0 == __FILE__ begin ToplevelInstaller.invoke rescue SetupError raise if $DEBUG $stderr.puts $!.message $stderr.puts "Try 'ruby #{$0} --help' for detailed usage." exit 1 end end rainbows-5.0.0/Summary0000644000004100000410000000002312641135250014742 0ustar www-datawww-dataolddoc_placeholder rainbows-5.0.0/DEPLOY0000644000004100000410000000311512641135250014306 0ustar www-datawww-data= Deploying \Rainbows! == \Rainbows! only For the daring, you should consider deploying \Rainbows! in a standalone configuration. This will be more highly recommended as \Rainbows! stabilizes, especially if static file performance improves (or you don't need them). You will need to do this to support things like BOSH or do real-time processing of the request body as it is being uploaded. In this case, haproxy or any similar (non-request-body-buffering) load balancer should be used to balance requests between different machines. == nginx proxying to \Rainbows! or unicorn For high-traffic applications, routing slow actions to \Rainbows! with nginx is recommended as nginx can serve static files faster and nginx can forward fast actions to unicorn. static files | nginx |--> slow actions --> Rainbows! | `--> fast actions --> unicorn Be sure to set proxy_buffering off in nginx for "slow actions" if you have Comet applications (but not for unicorn). == Denial-of-Service Concerns Since \Rainbows! is designed to talk to slow clients with long-held connections, it may be subject to brute force denial-of-service attacks. In unicorn and Mongrel, we've already enabled the "httpready" accept filter for FreeBSD and the TCP_DEFER_ACCEPT option in Linux; but it is still possible to build clients that work around and fool these mechanisms. \Rainbows! itself does not feature any explicit protection against brute force denial-of-service attacks. We believe this is best handled by dedicated firewalls provided by the operating system. rainbows-5.0.0/TODO0000644000004100000410000000061712641135250014063 0ustar www-datawww-data= TODO items for Rainbows! We're lazy and pick the easy items to do first, then the ones people care about. * investigate non-Rack frameworks (e.g. Goliath) * documentation improvements * Improve test suite coverage. We won't waste cycles with puny unit tests, only integration tests that exercise externally visible parts. * test and improve performance (throughput/latency/memory usage) rainbows-5.0.0/Documentation/0000755000004100000410000000000012641135250016200 5ustar www-datawww-datarainbows-5.0.0/Documentation/GNUmakefile0000644000004100000410000000123012641135250020246 0ustar www-datawww-dataall:: PANDOC = pandoc PANDOC_OPTS = -f markdown --email-obfuscation=none pandoc = $(PANDOC) $(PANDOC_OPTS) pandoc_html = $(pandoc) --toc -t html --no-wrap man1 := $(addsuffix .1,rainbows) html1 := $(addsuffix .html,$(man1)) all:: html man html: $(html1) man: $(man1) install-html: html mkdir -p ../doc/man1 install -m 644 $(html1) ../doc/man1 install-man: man mkdir -p ../man/man1 install -m 644 $(man1) ../man/man1 %.1: %.1.txt $(pandoc) -s -t man < $< > $@+ && mv $@+ $@ %.1.html: %.1.txt $(pandoc_html) < $< > $@+ && mv $@+ $@ comparison.html: comparison.haml haml -t ugly < $< > $@+ && mv $@+ $@ clean:: $(RM) $(man1) $(html1) comparison.html rainbows-5.0.0/Documentation/.gitignore0000644000004100000410000000003012641135250020161 0ustar www-datawww-data*.1 *.5 *.7 *.gz *.html rainbows-5.0.0/Documentation/rainbows.1.txt0000644000004100000410000001366712641135250020741 0ustar www-datawww-data% rainbows(1) Rainbows! User Manual % Rainbows! Hackers % December 3, 2009 # NAME rainbows - rackup-like command to launch Rainbows! # SYNOPSIS rainbows [-c CONFIG_FILE] [-E RACK_ENV] [-D] [RACKUP_FILE] # DESCRIPTION A rackup(1)-like command to launch Rack applications using Rainbows!. It is expected to be started in your application root (APP_ROOT), but the "working_directory" directive may be used in the CONFIG_FILE. While Rainbows! takes a myriad of command-line options for compatibility with ruby(1) and rackup(1), it is recommended to stick to the few command-line options specified in the SYNOPSIS and use the CONFIG_FILE as much as possible. # RACKUP FILE This defaults to \"config.ru\" in APP_ROOT. It should be the same file used by rackup(1) and other Rack launchers, it uses the *Rack::Builder* DSL. Embedded command-line options are mostly parsed for compatibility with rackup(1) but strongly discouraged. # UNICORN OPTIONS -c, \--config-file CONFIG_FILE : Path to the Unicorn-specific config file. The config file is implemented as a Ruby DSL, so Ruby code may executed. See the RDoc/ri for the *Unicorn::Configurator* class for the full list of directives available from the DSL. -D, \--daemonize : Run daemonized in the background. The process is detached from the controlling terminal and stdin is redirected to "/dev/null". Unlike many common UNIX daemons, we do not chdir to \"/\" upon daemonization to allow more control over the startup/upgrade process. Unless specified in the CONFIG_FILE, stderr and stdout will also be redirected to "/dev/null". -E, \--env RACK_ENV : Run under the given RACK_ENV. See the RACK ENVIRONMENT section for more details. -l, \--listen ADDRESS : Listens on a given ADDRESS. ADDRESS may be in the form of HOST:PORT or PATH, HOST:PORT is taken to mean a TCP socket and PATH is meant to be a path to a UNIX domain socket. Defaults to "0.0.0.0:8080" (all addresses on TCP port 8080) For production deployments, specifying the "listen" directive in CONFIG_FILE is recommended as it allows fine-tuning of socket options. -N, \--no-default-middleware : Disables loading middleware implied by RACK_ENV. This bypasses the configuration documented in the RACK ENVIRONMENT section, but still allows RACK_ENV to be used for application/framework-specific purposes. # RACKUP COMPATIBILITY OPTIONS -o, \--host HOST : Listen on a TCP socket belonging to HOST, default is "0.0.0.0" (all addresses). If specified multiple times on the command-line, only the last-specified value takes effect. This option only exists for compatibility with the rackup(1) command, use of "-l"/"\--listen" switch is recommended instead. -p, \--port PORT : Listen on the specified TCP PORT, default is 8080. If specified multiple times on the command-line, only the last-specified value takes effect. This option only exists for compatibility with the rackup(1) command, use of "-l"/"\--listen" switch is recommended instead. -s, \--server SERVER : No-op, this exists only for compatibility with rackup(1). # RUBY OPTIONS -e, \--eval LINE : Evaluate a LINE of Ruby code. This evaluation happens immediately as the command-line is being parsed. -d, \--debug : Turn on debug mode, the $DEBUG variable is set to true. -w, \--warn : Turn on verbose warnings, the $VERBOSE variable is set to true. -I, \--include PATH : specify $LOAD_PATH. PATH will be prepended to $LOAD_PATH. The \':\' character may be used to delimit multiple directories. This directive may be used more than once. Modifications to $LOAD_PATH take place immediately and in the order they were specified on the command-line. -r, \--require LIBRARY : require a specified LIBRARY before executing the application. The \"require\" statement will be executed immediately and in the order they were specified on the command-line. # SIGNALS The following UNIX signals may be sent to the master process: * HUP - reload config file, app, and gracefully restart all workers * INT/TERM - quick shutdown, kills all workers immediately * QUIT - graceful shutdown, waits for workers to finish their current request before finishing. * USR1 - reopen all logs owned by the master and all workers See Unicorn::Util.reopen_logs for what is considered a log. * USR2 - reexecute the running binary. A separate QUIT should be sent to the original process once the child is verified to be up and running. * WINCH - gracefully stops workers but keep the master running. This will only work for daemonized processes. * TTIN - increment the number of worker processes by one * TTOU - decrement the number of worker processes by one See the [SIGNALS][4] document for full description of all signals used by Rainbows!. # RACK ENVIRONMENT Accepted values of RACK_ENV and the middleware they automatically load (outside of RACKUP_FILE) are exactly as those in rackup(1): * development - loads Rack::CommonLogger, Rack::ShowExceptions, and Rack::Lint middleware * deployment - loads Rack::CommonLogger middleware * none - loads no middleware at all, relying entirely on RACKUP_FILE All unrecognized values for RACK_ENV are assumed to be "none". Production deployments are strongly encouraged to use "deployment" or "none" for maximum performance. Note the Rack::ContentLength and Rack::Chunked middlewares are also loaded by "deployment" and "development", but no other values of RACK_ENV. If needed, they must be individually specified in the RACKUP_FILE, some frameworks do not require them. # SEE ALSO * unicorn(1) * *Rack::Builder* ri/RDoc * *Unicorn::Configurator* ri/RDoc * [Rainbows! RDoc][1] * [Rack RDoc][2] * [Rackup HowTo][3] [1]: http://rainbows.bogomips.org/ [2]: http://rdoc.info/gems/r#/gems/rack/frames [3]: http://wiki.github.com/rack/rack/tutorial-rackup-howto [4]: http://rainbows.bogomips.org/SIGNALS.html rainbows-5.0.0/Documentation/comparison.haml0000644000004100000410000002415612641135250021225 0ustar www-datawww-data%h1 Rainbows! at a glance %p Confused by all the options we give you? So are we! Here's some tables to help keep your head straight. Remember, engineering is all about trade-offs. %h2 core features and compatibility %br %table.comp %tr.comp_header %th.mod module %th.tee rack.input streaming %th.r18 Ruby 1.8 %th.r19 Ruby 1.9 %th.rbx Rubinius %th.slow slow clients %tr.comp_base %td.mod Unicorn/Base %td.tee Yes %td.r18 Yes %td.r19 Yes %td.rbx Yes %td.slow No %tr.comp_row %td.mod Revactor %td.tee Yes %td.r18 No %td.r19 Yes %td.rbx No %td.slow Yes %tr.comp_row %td.mod ThreadPool %td.tee Yes %td.r18 Yes %td.r19 Yes %td.rbx Yes %td.slow Yes %tr.comp_row %td.mod Coolio %td.tee No %td.r18 Yes %td.r19 Yes %td.rbx No %td.slow Yes %tr.comp_row %td.mod ThreadSpawn %td.tee Yes %td.r18 Yes %td.r19 Yes %td.rbx Yes %td.slow Yes %tr.comp_row %td.mod EventMachine %td.tee No %td.r18 Yes %td.r19 Yes %td.rbx No %td.slow Yes %tr.comp_row %td.mod CoolioThreadSpawn %td.tee No %td.r18 No %td.r19 Yes %td.rbx No %td.slow Yes %tr.comp_row %td.mod FiberSpawn %td.tee Yes %td.r18 No %td.r19 Yes %td.rbx No %td.slow Yes %tr.comp_row %td.mod FiberPool %td.tee Yes %td.r18 No %td.r19 Yes %td.rbx No %td.slow Yes %tr.comp_base %td.mod ActorSpawn %td.tee Yes %td.r18 Not yet %td.r19 No %td.rbx Yes %td.slow Yes %tr.comp_base %td.mod NeverBlock %td.tee No %td.r18 Yes %td.r19 Yes %td.rbx No %td.slow Yes %tr.comp_row %td.mod CoolioThreadPool %td.tee No %td.r18 Yes %td.r19 No %td.rbx No %td.slow Yes %tr.comp_row %td.mod CoolioFiberSpawn %td.tee Yes %td.r18 No %td.r19 Yes %td.rbx No %td.slow Yes %tr.comp_row %td.mod WriterThreadPool %td.tee Yes %td.r18 Yes %td.r19 Yes %td.rbx Yes %td.slow no %tr.comp_row %td.mod WriterThreadSpawn %td.tee Yes %td.r18 Yes %td.r19 Yes %td.rbx Yes %td.slow no %tr.comp_row %td.mod Epoll %td.tee no %td.r18 Yes %td.r19 Yes %td.rbx Yes %td.slow Yes %tr.comp_row %td.mod XEpoll %td.tee no %td.r18 Yes %td.r19 Yes %td.rbx Yes %td.slow Yes %tr.comp_row %td.mod XEpollThreadSpawn %td.tee Yes %td.r18 Yes %td.r19 Yes %td.rbx Yes %td.slow Yes %tr.comp_row %td.mod XEpollThreadPool %td.tee Yes %td.r18 Yes %td.r19 Yes %td.rbx Yes %td.slow Yes %ul %li Cool.io should also work with Rubinius (though we haven't had time to test). %li CoolioThread* and CoolioThread* requires Ruby 1.9 reasonable performance %li rack.input streaming is what makes %a(href="http://upr.bogomips.org/") upload progress, and BOSH possible %li rack.input streaming is NOT compatible with current versions of nginx or any proxy that fully buffers request bodies before proxying. Keep in mind request body buffering in nginx is a good thing in all other cases where rack.input streaming is not needed. %h2 application requirements %br %table.comp %tr.comp_header %th.mod module %th.slowio slow I/O (backend, not client) %th.thr thread safety %th.reent single thread reentrant %tr.comp_base %td.mod Unicorn/Base %td.slowio avoid %td.thr No %td.reent No %tr.comp_row %td.mod Revactor %td.slowio %a(href="http://coolio.github.com/")Coolio, %a(href="http://revactor.org/")Revactor, %b not %a(href="Rainbows/Fiber/IO.html")Fiber::IO %td.thr No %td.reent Yes %tr.comp_row %td.mod ThreadPool %td.slowio thread-safe Ruby %td.thr Yes %td.reent No %tr.comp_row %td.mod Coolio %td.slowio %a(href="http://coolio.github.com/") Coolio %td.thr No %td.reent No %tr.comp_row %td.mod ThreadSpawn %td.slowio thread-safe Ruby %td.thr Yes %td.reent No %tr.comp_row %td.mod EventMachine %td.slowio %a(href="http://rubyeventmachine.com") EventMachine %td.thr No %td.reent No %tr.comp_row %td.mod CoolioThreadSpawn %td.slowio thread-safe Ruby, %a(href="http://coolio.github.com/") Coolio %td.thr Yes %td.reent No %tr.comp_row %td.mod FiberSpawn %td.slowio %a(href="Rainbows/Fiber/IO.html") Rainbows::Fiber::IO %td.thr No %td.reent Yes %tr.comp_row %td.mod FiberPool %td.slowio %a(href="Rainbows/Fiber/IO.html") Rainbows::Fiber::IO %td.thr No %td.reent Yes %tr.comp_base %td.mod ActorSpawn %td.slowio thread-safe Ruby %td.thr Yes %td.reent Yes %tr.comp_base %td.mod NeverBlock %td.slowio %a(href="http://www.espace.com.eg/neverblock") NeverBlock, %a(href="http://rubyeventmachine.com") EventMachine %td.thr No %td.reent Yes %tr.comp_row %td.mod CoolioThreadPool %td.slowio thread-safe Ruby, %a(href="http://coolio.github.com/") Coolio %td.thr Yes %td.reent No %tr.comp_row %td.mod CoolioFiberSpawn %td.slowio %a(href="Rainbows/Fiber/IO.html") Rainbows::Fiber::IO %td.thr No %td.reent Yes %tr.comp_base %td.mod WriterThreadPool %td.slowio avoid %td.thr Maybe %td.reent Maybe %tr.comp_base %td.mod WriterThreadSpawn %td.slowio avoid %td.thr Maybe %td.reent Maybe %tr.comp_base %td.mod Epoll %td.slowio No %td.thr No %td.reent No %tr.comp_base %td.mod XEpoll %td.slowio No %td.thr No %td.reent No %tr.comp_base %td.mod XEpollThreadSpawn %td.slowio thread-safe Ruby %td.thr Yes %td.reent No %tr.comp_base %td.mod XEpollThreadPool %td.slowio thread-safe Ruby %td.thr Yes %td.reent No %ul %li Requirements for single thread reentrancy are loose in that there is no risk of race conditions and potentially mutually exclusive to thread-safety. In the case where a Fiber yields while holding a resource and another Fiber attempting to acquire it may raise an error or worse, deadlock the entire process. %li Slow I/O means anything that can block/stall on sockets including 3rd-party APIs (OpenID providers included) or slow database queries. Properly run Memcached (within the same LAN) is fast and not a blocker. Slow I/O on POSIX filesystems only includes a few operations, namely on UNIX domain sockets and named pipes. Nearly all other operations on POSIX filesystems can be considered "fast", or at least uninterruptible. %li WriterThread{Pool,Spawn} will require thread safety if your response body is dynamically generated during the body#each call. %h2 middlewares and frameworks %br %table.comp %tr.comp_header %th.mod model %th.devfd %a(href="Rainbows/DevFdResponse.html") DevFdResponse %th.app_pool %a(href="Rainbows/AppPool.html") AppPool %th.lock %a(href="http://rdoc.info/gems/rack/Rack/Lock") Rack::Lock %th.async async %tr.comp_row %td.mod Unicorn/Base %td.devfd no-op %td.app_pool no-op %td.lock no-op %td.async lots of RAM :P %tr.comp_row %td.mod Revactor %td.devfd no-op %td.app_pool Yes %td.lock No! %td.async Revactor itself %tr.comp_row %td.mod ThreadPool %td.devfd Yes %td.app_pool Yes %td.lock Yes %td.async thread-safe Ruby %tr.comp_row %td.mod Coolio %td.devfd Yes %td.app_pool no-op %td.lock no-op %td.async DevFdResponse %tr.comp_row %td.mod ThreadSpawn %td.devfd Yes %td.app_pool Yes %td.lock Yes %td.async thread-safe Ruby %tr.comp_row %td.mod EventMachine %td.devfd Yes %td.app_pool no-op %td.lock no-op %td.async async_sinatra, Cramp, rack-fiber_pool %tr.comp_row %td.mod CoolioThreadSpawn %td.devfd Yes %td.app_pool Yes %td.lock Dumb %td.async thread-safe Ruby %tr.comp_row %td.mod FiberSpawn %td.devfd Yes %td.app_pool Yes %td.lock No! %td.async Rainbows::Fiber::IO, Rainbows.sleep %tr.comp_row %td.mod FiberPool %td.devfd Yes %td.app_pool Yes %td.lock No! %td.async Rainbows::Fiber::IO, Rainbows.sleep %tr.comp_row %td.mod ActorSpawn %td.devfd no-op %td.app_pool Yes %td.lock Yes %td.async thread-safe Ruby %tr.comp_row %td.mod NeverBlock %td.devfd Yes %td.app_pool Yes* %td.lock Yes* %td.async NeverBlock, async_sinatra %tr.comp_row %td.mod CoolioThreadPool %td.devfd Yes %td.app_pool Yes %td.lock Dumb %td.async Coolio, thread-safe Ruby %tr.comp_row %td.mod CoolioFiberSpawn %td.devfd Yes %td.app_pool Yes %td.lock No! %td.async Coolio, Rainbows::Fiber::IO, Rainbows.sleep %tr.comp_row %td.mod WriterThreadPool %td.devfd Yes %td.app_pool no-op %td.lock no-op %td.async thread-safe Ruby in response body only %tr.comp_row %td.mod WriterThreadSpawn %td.devfd Yes %td.app_pool no-op %td.lock no-op %td.async thread-safe Ruby in response body only %tr.comp_row %td.mod Epoll %td.devfd Yes %td.app_pool no-op %td.lock no-op %td.async DevFdResponse %tr.comp_row %td.mod XEpoll %td.devfd Yes %td.app_pool no-op %td.lock no-op %td.async DevFdResponse %tr.comp_row %td.mod XEpollThreadPool %td.devfd Yes %td.app_pool Yes %td.lock Yes %td.async thread-safe Ruby %tr.comp_row %td.mod XEpollThreadSpawn %td.devfd Yes %td.app_pool Yes %td.lock Yes %td.async thread-safe Ruby %ul %li "No!" means it's fundamentally incompatible, use an %a(href="Rainbows/AppPool.html") AppPool %b :size of one instead. %li NeverBlock also supports a :pool_size option which is one less layer of complexity than using AppPool. %li NeverBlock can neuter the Mutex class so Rack::Lock effectively becomes a no-op with: %br %code require "never_block/frameworks/rails" (before Rails is loaded) %li Everything that's DevFdResponse-compatible can use it for passing async responses through rainbows-5.0.0/SIGNALS0000644000004100000410000001132112641135250014410 0ustar www-datawww-data== Signal handling In general, signals need only be sent to the master process. However, the signals Rainbows! uses internally to communicate with the worker processes are documented here as well. With the exception of TTIN/TTOU, signal handling matches the behavior of and {nginx}[http://nginx.net/] so it should be possible to easily share process management scripts between \Rainbows!, unicorn and nginx. === Master Process * HUP - reload config file, app, and gracefully restart all workers If the "preload_app" directive is false (the default), then workers will also pick up any application code changes when restarted. If "preload_app" is true, then application code changes will have no effect; USR2 + QUIT (see below) must be used to load newer code in this case. When reloading the application, +Gem.refresh+ will be called so updated code for your application can pick up newly installed RubyGems. It is not recommended that you uninstall libraries your application depends on while Rainbows! is running, as respawned workers may enter a spawn loop when they fail to load an uninstalled dependency. * INT/TERM - quick shutdown, kills all workers immediately * QUIT - graceful shutdown, waits for workers to finish their current request before finishing. This currently does not wait for requests deferred to a separate thread when using EventMachine (when app.deferred?(env) => true) * USR1 - reopen all logs owned by the master and all workers See Unicorn::Util.reopen_logs for what is considered a log. * USR2 - reexecute the running binary. A separate QUIT should be sent to the original process once the child is verified to be up and running. * WINCH - gracefully stops workers but keep the master running. This will only work for daemonized processes. * TTIN - increment the number of worker processes by one * TTOU - decrement the number of worker processes by one === Worker Processes Sending signals directly to the worker processes should not normally be needed. If the master process is running, any exited worker will be automatically respawned. * INT/TERM - Quick shutdown, immediately exit. Unless WINCH has been sent to the master (or the master is killed), the master process will respawn a worker to replace this one. * QUIT - Gracefully exit after finishing the current request. Unless WINCH has been sent to the master (or the master is killed), the master process will respawn a worker to replace this one. This currently does not wait for requests deferred to a separate thread when using EventMachine (when app.deferred?(env) => true) * USR1 - Reopen all logs owned by the worker process. See Unicorn::Util.reopen_logs for what is considered a log. Unlike unicorn, log files are reopened immediately in \Rainbows! since worker processes are likely to be serving multiple clients simutaneously, we can't wait for all of them to finish. === Procedure to replace a running rainbows executable You may replace a running instance of rainbows with a new one without losing any incoming connections. Doing so will reload all of your application code, unicorn/Rainbows! config, Ruby executable, and all libraries. The only things that will not change (due to OS limitations) are: 1. The path to the rainbows executable script. If you want to change to a different installation of Ruby, you can modify the shebang line to point to your alternative interpreter. The procedure is exactly like that of nginx: 1. Send USR2 to the master process 2. Check your process manager or pid files to see if a new master spawned successfully. If you're using a pid file, the old process will have ".oldbin" appended to its path. You should have two master instances of rainbows running now, both of which will have workers servicing requests. Your process tree should look something like this: rainbows master (old) \_ rainbows worker[0] \_ rainbows worker[1] \_ rainbows worker[2] \_ rainbows worker[3] \_ rainbows master \_ rainbows worker[0] \_ rainbows worker[1] \_ rainbows worker[2] \_ rainbows worker[3] 3. You can now send WINCH to the old master process so only the new workers serve requests. If your rainbows process is bound to an interactive terminal, you can skip this step. Step 5 will be more difficult but you can also skip it if your process is not daemonized. 4. You should now ensure that everything is running correctly with the new workers as the old workers die off. 5. If everything seems ok, then send QUIT to the old master. You're done! If something is broken, then send HUP to the old master to reload the config and restart its workers. Then send QUIT to the new master process. rainbows-5.0.0/GIT-VERSION-GEN0000755000004100000410000000205612641135250015475 0ustar www-datawww-data#!/usr/bin/env ruby DEF_VER = "v5.0.0" CONSTANT = "Rainbows::Const::RAINBOWS_VERSION" RVF = "lib/rainbows/version.rb" GVF = "GIT-VERSION-FILE" vn = DEF_VER # First see if there is a version file (included in release tarballs), # then try git-describe, then default. if File.exist?(".git") describe = `git describe --abbrev=4 HEAD 2>/dev/null`.strip case describe when /\Av[0-9]*/ vn = describe system(*%w(git update-index -q --refresh)) unless `git diff-index --name-only HEAD --`.chomp.empty? vn << "-dirty" end vn.tr!('-', '.') end end vn = vn.sub!(/\Av/, "") # generate the Ruby constant new_ruby_version = "#{CONSTANT} = '#{vn}'\n" cur_ruby_version = File.read(RVF) rescue nil if new_ruby_version != cur_ruby_version File.open(RVF, "w") { |fp| fp.write(new_ruby_version) } end # generate the makefile snippet new_make_version = "GIT_VERSION = #{vn}\n" cur_make_version = File.read(GVF) rescue nil if new_make_version != cur_make_version File.open(GVF, "w") { |fp| fp.write(new_make_version) } end puts vn if $0 == __FILE__ rainbows-5.0.0/vs_Unicorn0000644000004100000410000000573112641135250015445 0ustar www-datawww-data= \Rainbows! is like unicorn, but Different... While \Rainbows! depends on unicorn for its process/socket management, HTTP parser and configuration language; \Rainbows! is more ambitious. == Architectural Diagrams === unicorn uses a 1:1 mapping of processes to clients unicorn master \_ unicorn worker[0] | \_ client[0] \_ unicorn worker[1] | \_ client[1] \_ unicorn worker[2] | \_ client[2] ... \_ unicorn worker[M] \_ client[M] === \Rainbows! uses a M:N mapping of processes to clients rainbows master \_ rainbows worker[0] | \_ client[0,0] | \_ client[0,1] | \_ client[0,2] | ... | \_ client[0,N] \_ rainbows worker[1] | \_ client[1,0] | \_ client[1,1] | \_ client[1,2] | \_ client[1,3] | ... | \_ client[1,N] \_ rainbows worker[2] | \_ client[2,0] | \_ client[2,1] | \_ client[2,2] | ... | \_ client[2,N] ... \_ rainbows worker[M] \_ client[M,0] \_ client[M,1] \_ client[M,2] ... \_ client[M,N] In both cases, workers share common listen sockets with the master and pull connections off the listen queue only if the worker has resources available. == Differences from unicorn * log rotation is handled immediately in \Rainbows! whereas unicorn has the luxury of delaying it until the current request is finished processing to prevent log entries for one request to be split across files. * load balancing between workers is imperfect, certain worker processes may be servicing more requests than others so it is important to not set +worker_connections+ too high. unicorn worker processes can never be servicing more than one request at once. * speculative, non-blocking accept() is not used, this is to help load balance between multiple worker processes. * HTTP pipelining and keepalive may be used for GET and HEAD requests. * Less heavily-tested and inherently more complex. == Similarities with unicorn While some similarities are obvious (we depend on and subclass off unicorn code), some things are not: * Does not attempt to accept() connections when pre-configured limits are hit (+worker_connections+). This will first help balance load to different worker processes, and if your listen() +:backlog+ is overflowing: to other machines in your cluster. * Accepts the same {signals}[http://unicorn.bogomips.org/SIGNALS.html] for process management, so you can share scripts to manage them (and nginx, too). * supports per-process listeners, allowing an external load balancer like haproxy or nginx to be used to balance between multiple worker processes. * Exposes a streaming "rack.input" to the Rack application that reads data off the socket as the application reads it (while retaining rewindable semantics as required by Rack). This allows Rack-compliant apps/middleware to implement things such as real-time upload progress monitoring. rainbows-5.0.0/.document0000644000004100000410000000203112641135250015202 0ustar www-datawww-dataDEPLOY FAQ lib/rainbows.rb lib/rainbows/actor_spawn.rb lib/rainbows/app_pool.rb lib/rainbows/base.rb lib/rainbows/configurator.rb lib/rainbows/coolio.rb lib/rainbows/coolio_thread_pool.rb lib/rainbows/coolio_thread_spawn.rb lib/rainbows/dev_fd_response.rb lib/rainbows/epoll.rb lib/rainbows/event_machine.rb lib/rainbows/fiber_pool.rb lib/rainbows/fiber_spawn.rb lib/rainbows/max_body.rb lib/rainbows/never_block.rb lib/rainbows/rev.rb lib/rainbows/rev_fiber_spawn.rb lib/rainbows/rev_thread_pool.rb lib/rainbows/rev_thread_spawn.rb lib/rainbows/revactor.rb lib/rainbows/sendfile.rb lib/rainbows/server_token.rb lib/rainbows/stream_response_epoll.rb lib/rainbows/thread_pool.rb lib/rainbows/thread_spawn.rb lib/rainbows/thread_timeout.rb lib/rainbows/worker_yield.rb lib/rainbows/writer_thread_pool.rb lib/rainbows/writer_thread_spawn.rb lib/rainbows/xepoll.rb lib/rainbows/xepoll_thread_pool.rb lib/rainbows/xepoll_thread_spawn.rb LATEST LICENSE NEWS rainbows_1 README SIGNALS TODO TUNING vs_Unicorn Summary Test_Suite Static_Files Sandbox HACKING rainbows-5.0.0/Sandbox0000644000004100000410000000130512641135250014707 0ustar www-datawww-data= Tips for using \Rainbows! with Sandbox installation tools Most {tips for unicorn}[http://unicorn.bogomips.org/Sandbox.html] for Bundler and Isolate apply to \Rainbows! as well. == TLDR (Bundler) You need to add "rainbows" to your Gemfile for Bundler and start \Rainbows! with: bundle exec rainbows ... == TLDR (Isolate) Isolate "rainbows" and execute the "rainbows" launcher in your isolated GEM_PATH: $APP_ROOT/tmp/ruby-1.9/bin/rainbows ... == Explanation Due to the variety of potential dependencies, \Rainbows! lazy loads many of its internals, often after the application itself is loaded. This results in more potential to interact badly with sandbox tools that modify the gem environment. rainbows-5.0.0/COPYING0000644000004100000410000010436712641135250014435 0ustar www-datawww-data GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read .