minimization-0.2.5/ 0000755 0000041 0000041 00000000000 13413717302 014256 5 ustar www-data www-data minimization-0.2.5/Gemfile.lock 0000644 0000041 0000041 00000001250 13413717302 016476 0 ustar www-data www-data PATH
remote: .
specs:
minimization (0.2.5)
text-table (~> 1.2)
GEM
remote: https://rubygems.org/
specs:
diff-lcs (1.3)
rake (10.5.0)
rspec (3.8.0)
rspec-core (~> 3.8.0)
rspec-expectations (~> 3.8.0)
rspec-mocks (~> 3.8.0)
rspec-core (3.8.0)
rspec-support (~> 3.8.0)
rspec-expectations (3.8.2)
diff-lcs (>= 1.2.0, < 2.0)
rspec-support (~> 3.8.0)
rspec-mocks (3.8.0)
diff-lcs (>= 1.2.0, < 2.0)
rspec-support (~> 3.8.0)
rspec-support (3.8.0)
text-table (1.2.4)
PLATFORMS
ruby
DEPENDENCIES
bundler (~> 1.3)
minimization!
rake (~> 10)
rspec (~> 3.2)
BUNDLED WITH
1.17.2
minimization-0.2.5/.travis.yml 0000644 0000041 0000041 00000000301 13413717302 016361 0 ustar www-data www-data language: ruby
rvm:
- 2.0
- 2.1
- 2.2
- 2.5
- 2.6
script: bundle install && bundle exec rake spec
before_install:
- sudo apt-get update -qq
- sudo apt-get install -y libgsl0-dev
minimization-0.2.5/README.md 0000644 0000041 0000041 00000001336 13413717302 015540 0 ustar www-data www-data = minimization
* http://github.com/clbustos/minimization
== DESCRIPTION:
Minimization algorithms on pure Ruby.
== FEATURES/PROBLEMS:
Unidimensional:
* Newton-Rahpson (requires first and second derivative)
* Golden Section
* Brent (Port of GSL code)
Multidimensional:
* Fletcher-Reeves (requires first derivative)
* Polak Rebirer (requires first derivative)
* Nelder-Mead
* Powell's method
If you needs speed, use gem *gsl*
== SYNOPSIS:
d=Minimization::Brent.new(-1000,20000 , proc {|x| x**2})
d.iterate
puts d.x_minimum
puts d.f_minimum
== REQUIREMENTS:
* Pure Ruby
== INSTALL:
sudo gem install minimization
== API:
http://ruby-statsample.rubyforge.org/minimization/
== LICENSE:
BSD 2-clause (See LICENSE.txt)
minimization-0.2.5/spec/ 0000755 0000041 0000041 00000000000 13413717302 015210 5 ustar www-data www-data minimization-0.2.5/spec/minimization_unidimensional_spec.rb 0000644 0000041 0000041 00000003411 13413717302 024353 0 ustar www-data www-data require File.expand_path(File.dirname(__FILE__) + '/spec_helper')
describe Minimization::Unidimensional, "subclass" do
before(:all) do
@p1=rand(100)
@p2=rand(100)
@func=lambda {|x| (x-@p1)**2+@p2}
@funcd=lambda {|x| 2*(x-@p1)}
@funcdd=lambda {|x| 2}
end
describe Minimization::NewtonRaphson do
before do
@min = Minimization::NewtonRaphson.new(-1000,1000, @func,@funcd, @funcdd)
@min.iterate
end
it "#x_minimum be close to expected" do
@min.x_minimum.should be_within(@min.epsilon).of(@p1)
end
it "#f_minimum ( f(x)) be close to expected" do
@min.f_minimum.should be_within(@min.epsilon).of(@p2)
end
context "#log" do
subject {@min.log}
it {should be_instance_of Array}
it {should respond_to :to_table}
end
end
describe Minimization::GoldenSection do
before do
@min = Minimization::GoldenSection.minimize(-1000,1000, &@func)
end
it "#x_minimum be close to expected" do
@min.x_minimum.should be_within(@min.epsilon).of(@p1)
end
it "#f_minimum ( f(x)) be close to expected" do
@min.f_minimum.should be_within(@min.epsilon).of(@p2)
end
context "#log" do
subject {@min.log}
it {should be_instance_of Array}
it {should respond_to :to_table}
end
end
describe Minimization::Brent do
before do
@min = Minimization::Brent.minimize(-1000,1000, &@func)
end
it "should x be correct" do
@min.x_minimum.should be_within(@min.epsilon).of(@p1)
end
it "should f(x) be correct" do
@min.f_minimum.should be_within(@min.epsilon).of(@p2)
end
context "#log" do
subject {@min.log}
it {should be_instance_of Array}
it {should respond_to :to_table}
end
end
end
minimization-0.2.5/spec/minimization_conjugate_gradient_fletcher_reeves_spec.rb 0000644 0000041 0000041 00000004210 13413717302 030414 0 ustar www-data www-data require "#{File.expand_path(File.dirname(__FILE__))}/../lib/multidim/conjugate_gradient.rb"
describe Minimization::FletcherReeves do
before :all do
@n = 3
@limit = 100
@epsilon = 1e-6
@p = Array.new(@n)
@start_point = Array.new(@n)
0.upto(@n - 1) do |i|
@p[i] = rand(@limit)
end
0.upto(@n - 1) do |i|
@start_point[i] = rand(@limit)
end
# fletcher_reeves example 1
puts @p.inspect
f = proc{ |x| (x[0] - @p[0])**2 + (x[1] - @p[1])**2 + (x[2] - @p[2])**2 }
fd = proc{ |x| [ 2 * (x[0] - @p[0]) , 2 * (x[1] - @p[1]) , 2 * (x[2] - @p[2]) ] }
@min1 = Minimization::FletcherReeves.minimize(f, fd, @start_point)
# fletcher_reeves example 2
@k = rand(@limit)
f2 = proc{ |x| ( @p[0]*x[0] + @p[1]*x[1] + @p[2]*x[2] )**2 + @k}
fd2 = proc{ |x|
r0 = ( @p[0]*x[0] + @p[1]*x[1] + @p[2]*x[2] ) * 2 * @p[0]
r1 = ( @p[0]*x[0] + @p[1]*x[1] + @p[2]*x[2] ) * 2 * @p[1]
r2 = ( @p[0]*x[0] + @p[1]*x[1] + @p[2]*x[2] ) * 2 * @p[2]
[r0, r1, r2]
}
@min2 = Minimization::FletcherReeves.minimize(f2, fd2, @start_point)
# fletcher_reeves example 3 : unidimensional
f3 = proc{ |x| ( (x[0] - @p[0])**2 + @k ) }
fd3 = proc{ |x| [ (x[0] - @p[0]) * 2 ] }
starting_point_3 = [rand(@limit)]
@min3 = Minimization::FletcherReeves.minimize(f3, fd3, starting_point_3)
end
it "#x_minimum be close to expected in example 1 :fletcher_reeves" do
0.upto(@n - 1) do |i|
expect(@min1.x_minimum[i]).to be_within(@epsilon).of(@p[i])
end
end
it "#f_minimum be close to expected in example 1 :fletcher_reeves" do
expect(@min1.f_minimum).to be_within(@epsilon).of(0)
end
it "#f_minimum be close to expected in example 2 :fletcher_reeves" do
expect(@min2.f_minimum).to be_within(@epsilon).of(@k)
end
it "#x_minimum be close to expected in example 3 :fletcher_reeves" do
expect(@min3.x_minimum[0]).to be_within(@epsilon).of(@p[0])
end
it "#f_minimum be close to expected in example 3 :fletcher_reeves" do
expect(@min3.f_minimum).to be_within(@epsilon).of(@k)
end
end
minimization-0.2.5/spec/spec.opts 0000644 0000041 0000041 00000000014 13413717302 017044 0 ustar www-data www-data --color
-f s minimization-0.2.5/spec/minimization_conjugate_gradient_polak_ribiere_spec.rb 0000644 0000041 0000041 00000003001 13413717302 030053 0 ustar www-data www-data require "#{File.expand_path(File.dirname(__FILE__))}/../lib/multidim/conjugate_gradient.rb"
describe Minimization::PolakRibiere do
before :all do
@n = 3
@limit = 100
@epsilon = 1e-6
@p = Array.new(@n)
@start_point = Array.new(@n)
0.upto(@n - 1) do |i|
@p[i] = rand(@limit)
end
0.upto(@n - 1) do |i|
@start_point[i] = rand(@limit)
end
# example 1
f = proc{ |x| (x[0] - @p[0])**2 + (x[1] - @p[1])**2 + (x[2] - @p[2])**2 }
fd = proc{ |x| [ 2 * (x[0] - @p[0]) , 2 * (x[1] - @p[1]) , 2 * (x[2] - @p[2]) ] }
@min1 = Minimization::PolakRibiere.minimize(f, fd, @start_point)
# example 2 : unidimensional
@k = rand(@limit)
f2 = proc{ |x| ( (x[0] - @p[0])**2 + @k ) }
fd2 = proc{ |x| [ (x[0] - @p[0]) * 2 ] }
starting_point_2 = [rand(@limit)]
@min2 = Minimization::PolakRibiere.minimize(f2, fd2, starting_point_2)
end
it "#x_minimum be close to expected in example 1 :polak_ribiere" do
0.upto(@n - 1) do |i|
expect(@min1.x_minimum[i]).to be_within(@epsilon).of(@p[i])
end
end
it "#f_minimum be close to expected in example 1 :polak_ribiere" do
expect(@min1.f_minimum).to be_within(@epsilon).of(0)
end
it "#x_minimum be close to expected in example 2 :polak_ribiere" do
expect(@min2.x_minimum[0]).to be_within(@epsilon).of(@p[0])
end
it "#f_minimum be close to expected in example 2 :polak_ribiere" do
expect(@min2.f_minimum).to be_within(@epsilon).of(@k)
end
end
minimization-0.2.5/spec/spec_helper.rb 0000644 0000041 0000041 00000000507 13413717302 020030 0 ustar www-data www-data $LOAD_PATH.unshift(File.dirname(__FILE__))
$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
require 'minimization.rb'
require 'rspec'
RSpec.configure do |config|
config.expect_with :rspec do |c|
c.syntax = [:should, :expect]
end
end
class String
def deindent
gsub /^[ \t]*/, ''
end
end
minimization-0.2.5/spec/minimization_powell_spec_spec.rb 0000644 0000041 0000041 00000003231 13413717302 023651 0 ustar www-data www-data require "#{File.expand_path(File.dirname(__FILE__))}/../lib/multidim/powell.rb"
describe Minimization::Powell do
before :all do
@n = 3
@limit = 100
@epsilon = 1e-5
@p = Array.new(@n)
@start_point = Array.new(@n)
0.upto(@n - 1) do |i|
@p[i] = rand(@limit)
end
0.upto(@n - 1) do |i|
@start_point[i] = rand(@limit)
end
# example 1
f = proc{ |x| (x[0] - @p[0])**2 + (x[1] - @p[1])**2 + (x[2] - @p[2])**2 }
@min1 = Minimization::Powell.minimize(f, @start_point, [-@limit, -@limit, -@limit], [@limit, @limit, @limit])
# example 2
@k = rand(@limit)
f2 = proc{ |x| ( @p[0]*x[0] + @p[1]*x[1] + @p[2]*x[2] )**2 + @k}
@min2 = Minimization::Powell.minimize(f2, @start_point, [-@limit, -@limit, -@limit], [@limit, @limit, @limit])
# example 3 : unidimensional
f3 = proc{ |x| (x[0] - @p[0])**2 + @k}
@min3 = Minimization::Powell.minimize(f3, @start_point, [-@limit, -@limit, -@limit], [@limit, @limit, @limit])
end
it "#x_minimum be close to expected in example 1" do
0.upto(@n - 1) do |i|
expect(@min1.x_minimum[i]).to be_within(@epsilon).of(@p[i])
end
end
it "#f_minimum be close to expected in example 1" do
expect(@min1.f_minimum).to be_within(@epsilon).of(0)
end
it "#f_minimum be close to expected in example 2" do
expect(@min2.f_minimum).to be_within(@epsilon).of(@k)
end
it "#x_minimum be close to expected in example 3" do
expect(@min3.x_minimum[0]).to be_within(@epsilon).of(@p[0])
end
it "#f_minimum be close to expected in example 3" do
expect(@min3.f_minimum).to be_within(@epsilon).of(@k)
end
end
minimization-0.2.5/spec/minimization_nelder_mead_spec.rb 0000644 0000041 0000041 00000003001 13413717302 023567 0 ustar www-data www-data require "#{File.expand_path(File.dirname(__FILE__))}/../lib/multidim/nelder_mead.rb"
describe Minimization::NelderMead do
before :all do
@n = 3
@limit = 100
@epsilon = 1e-6
@p = Array.new(@n)
@start_point = Array.new(@n)
0.upto(@n - 1) do |i|
@p[i] = rand(@limit)
end
0.upto(@n - 1) do |i|
@start_point[i] = rand(@limit)
end
# example 1
f = proc{ |x| (x[0] - @p[0])**2 + (x[1] - @p[1])**2 + (x[2] - @p[2])**2 }
@min1 = Minimization::NelderMead.minimize(f, @start_point)
# example 2
@k = rand(@limit)
f2 = proc{ |x| ( @p[0]*x[0] + @p[1]*x[1] + @p[2]*x[2] )**2 + @k}
@min2 = Minimization::NelderMead.minimize(f2, @start_point)
# example 3 : unidimensional
f3 = proc{ |x| (x[0] - @p[0])**2 + @k}
@min3 = Minimization::NelderMead.minimize(f3, [@k])
end
it "#x_minimum be close to expected in example 1" do
0.upto(@n - 1) do |i|
expect(@min1.x_minimum[i]).to be_within(@epsilon).of(@p[i])
end
end
it "#f_minimum be close to expected in example 1" do
expect(@min1.f_minimum).to be_within(@epsilon).of(0)
end
it "#f_minimum be close to expected in example 2" do
expect(@min2.f_minimum).to be_within(@epsilon).of(@k)
end
it "#x_minimum be close to expected in example 3" do
expect(@min3.x_minimum[0]).to be_within(@epsilon).of(@p[0])
end
it "#f_minimum be close to expected in example 3" do
expect(@min3.f_minimum).to be_within(@epsilon).of(@k)
end
end
minimization-0.2.5/.gitignore 0000644 0000041 0000041 00000000030 13413717302 016237 0 ustar www-data www-data Gemfile.lock
doc
pkg
*~
minimization-0.2.5/History.txt 0000644 0000041 0000041 00000001177 13413717302 016466 0 ustar www-data www-data === 0.2.5 / 2018-12-29
* Fixed gem in RubyGems
* Updated travis test
=== 0.2.4 / 2015-07-20
* Merge from clbustos repository
=== 0.2.3 / 2015-04-27
* Removed rb-gsl dependency.
=== 0.2.2 / 2015-04-03
* Added Travis-CI support.
* Removed Hoe in favor of using a gemspec.
=== 0.2.1 / 2010-11-14
* Added iterations method.
=== 0.2.0 / 2010-04-15
* New Minimization::NewtonRahpson class, which implements a naive Newton-Rahpson minimization method
`x = x_n - (f'(x) / f''(x))`.
=== 0.1.1 / 2010-03-19
* New Minimization#minimize convenience method.
=== 0.1.0 / 2010-02-24
* Golden Section and Brent Algorithm.
minimization-0.2.5/Rakefile 0000644 0000041 0000041 00000001134 13413717302 015722 0 ustar www-data www-data require 'rake'
require 'bundler'
require 'bundler/gem_tasks'
require "rspec/core/rake_task"
require 'rdoc/task'
# Setup the necessary gems, specified in the gemspec.
begin
Bundler.setup(:default, :development)
rescue Bundler::BundlerError => e
$stderr.puts e.message
$stderr.puts "Run `bundle install` to install missing gems"
exit e.status_code
end
RSpec::Core::RakeTask.new(:spec) do |spec|
spec.pattern = FileList['spec/**/*_spec.rb']
end
desc "Open an irb session preloaded with distribution"
task :console do
sh "irb -rubygems -I lib -r minimization.rb"
end
task :default => [:spec]
minimization-0.2.5/lib/ 0000755 0000041 0000041 00000000000 13413717302 015024 5 ustar www-data www-data minimization-0.2.5/lib/minimization.rb 0000644 0000041 0000041 00000030540 13413717302 020062 0 ustar www-data www-data # = minimization.rb -
# Minimization- Minimization algorithms on pure Ruby
# Copyright (C) 2010-2018 Claudio Bustos
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Algorithms for unidimensional minimization
# importing the multidimensional minimization classes
require_relative 'multidim/brent_root_finder'
require_relative 'multidim/conjugate_gradient'
require_relative 'multidim/nelder_mead'
require_relative 'multidim/point_value_pair'
require_relative 'multidim/powell'
require 'text-table'
module Minimization
FailedIteration=Class.new(Exception)
# Base class for unidimensional minimizers
class Unidimensional
# Default value for error on f(x)
EPSILON=1e-6
# Default number of maximum iterations
MAX_ITERATIONS=100
# Minimum value for x
attr_reader :x_minimum
# Minimum value for f(x)
attr_reader :f_minimum
# Log of iterations. Should be an array
attr_reader :log
# Name of fields of log
attr_reader :log_header
# Absolute error on x
attr_accessor :epsilon
# Expected value. Fast minimum finding if set
attr_reader :expected
# Numbers of iterations
attr_reader :iterations
# Create a new minimizer
def initialize(lower, upper, proc)
raise "first argument should be lower than second" if lower>=upper
@lower=lower
@upper=upper
@proc=proc
golden = 0.3819660;
@expected = @lower + golden * (@upper - @lower);
@max_iteration=MAX_ITERATIONS
@epsilon=EPSILON
@iterations=0
@log=[]
@log_header=%w{I xl xh f(xl) f(xh) dx df(x)}
end
# Set expected value
def expected=(v)
@expected=v
end
def log_summary
@log.join("\n")
end
# Convenience method to minimize
# == Parameters:
# * lower: Lower possible value
# * upper: Higher possible value
# * expected: Optional expected value. Faster the search is near correct value.
# * &block: Block with function to minimize
# == Usage:
# minimizer=Minimization::GoldenSection.minimize(-1000, 1000) {|x|
# x**2 }
#
def self.minimize(lower,upper,expected=nil,&block)
minimizer=new(lower,upper,block)
minimizer.expected=expected unless expected.nil?
raise FailedIteration unless minimizer.iterate
minimizer
end
# Iterate to find the minimum
def iterate
raise "You should implement this"
end
def f(x)
@proc.call(x)
end
end
# Classic Newton-Raphson minimization method.
# Requires first and second derivative
# == Usage
# f = lambda {|x| x**2}
# fd = lambda {|x| 2x}
# fdd = lambda {|x| 2}
# min = Minimization::NewtonRaphson.new(-1000,1000, f,fd,fdd)
# min.iterate
# min.x_minimum
# min.f_minimum
#
class NewtonRaphson < Unidimensional
# == Parameters:
# * lower: Lower possible value
# * upper: Higher possible value
# * proc: Original function
# * proc_1d: First derivative
# * proc_2d: Second derivative
#
def initialize(lower, upper, proc, proc_1d, proc_2d)
super(lower,upper,proc)
@proc_1d=proc_1d
@proc_2d=proc_2d
end
# Raises an error
def self.minimize(*args)
raise "You should use #new and #iterate"
end
def iterate
# First
x_prev=@lower
x=@expected
failed=true
k=0
while (x-x_prev).abs > @epsilon and k<@max_iteration
k+=1
x_prev=x
x=x-(@proc_1d.call(x).quo(@proc_2d.call(x)))
f_prev=f(x_prev)
f=f(x)
x_min,x_max=[x,x_prev].min, [x,x_prev].max
f_min,f_max=[f,f_prev].min, [f,f_prev].max
@log << [k, x_min, x_max, f_min, f_max, (x_prev-x).abs, (f-f_prev).abs]
end
raise FailedIteration, "Not converged" if k>=@max_iteration
@x_minimum = x;
@f_minimum = f(x);
end
end
# = Golden Section Minimizer.
# Basic minimization algorithm. Slow, but robust.
# See Unidimensional for methods.
# == Usage.
# require 'minimization'
# min=Minimization::GoldenSection.new(-1000,20000 , proc {|x| (x+1)**2}
# min.expected=1.5 # Expected value
# min.iterate
# min.x_minimum
# min.f_minimum
# min.log
class GoldenSection < Unidimensional
# Start the iteration
def iterate
ax=@lower
bx=@expected
cx=@upper
c = (3-Math::sqrt(5)).quo(2);
r = 1-c;
x0 = ax;
x3 = cx;
if ((cx-bx).abs > (bx-ax).abs)
x1 = bx;
x2 = bx + c*(cx-bx);
else
x2 = bx;
x1 = bx - c*(bx-ax);
end
f1 = f(x1);
f2 = f(x2);
k = 1;
while (x3-x0).abs > @epsilon and k<@max_iteration
if f2 < f1
x0 = x1;
x1 = x2;
x2 = r*x1 + c*x3; # x2 = x1+c*(x3-x1)
f1 = f2;
f2 = f(x2);
else
x3 = x2;
x2 = x1;
x1 = r*x2 + c*x0; # x1 = x2+c*(x0-x2)
f2 = f1;
f1 = f(x1);
end
@log << [k, x3,x0, f1,f2,(x3-x0).abs, (f1-f2).abs]
k +=1;
end
if f1 < f2
@x_minimum = x1;
@f_minimum = f1;
else
@x_minimum = x2;
@f_minimum = f2;
end
true
end
end
# Direct port of Brent algorithm found on GSL.
# See Unidimensional for methods.
# == Usage
# min=Minimization::Brent.new(-1000,20000 , proc {|x| (x+1)**2}
# min.expected=1.5 # Expected value
# min.iterate
# min.x_minimum
# min.f_minimum
# min.log
class Brent < Unidimensional
GSL_SQRT_DBL_EPSILON=1.4901161193847656e-08
def initialize(lower,upper, proc)
super
@do_bracketing=true
# Init
golden = 0.3819660; #golden = (3 - sqrt(5))/2
v = @lower + golden * (@upper - @lower);
w = v;
@x_minimum = v ;
@f_minimum = f(v) ;
@x_lower=@lower
@x_upper=@upper
@f_lower = f(@lower) ;
@f_upper = f(@lower) ;
@v = v;
@w = w;
@d = 0;
@e = 0;
@f_v=f(v)
@f_w=@f_v
end
def expected=(v)
@x_minimum=v
@f_minimum=f(v)
@do_bracketing=false
end
def bracketing
eval_max=10
f_left = @f_lower;
f_right = @f_upper;
x_left = @x_lower;
x_right= @x_upper;
golden = 0.3819660; # golden = (3 - sqrt(5))/2 */
nb_eval=0
if (f_right >= f_left)
x_center = (x_right - x_left) * golden + x_left;
nb_eval+=1;
f_center=f(x_center)
else
x_center = x_right ;
f_center = f_right ;
x_right = (x_center - x_left).quo(golden) + x_left;
nb_eval+=1;
f_right=f(x_right);
end
begin
@log << ["B#{nb_eval}", x_left, x_right, f_left, f_right, (x_left-x_right).abs, (f_left-f_right).abs]
if (f_center < f_left )
if (f_center < f_right)
@x_lower = x_left;
@x_upper = x_right;
@x_minimum = x_center;
@f_lower = f_left;
@f_upper = f_right;
@f_minimum = f_center;
return true;
elsif (f_center > f_right)
x_left = x_center;
f_left = f_center;
x_center = x_right;
f_center = f_right;
x_right = (x_center - x_left).quo(golden) + x_left;
nb_eval+=1;
f_right=f(x_right);
else # f_center == f_right */
x_right = x_center;
f_right = f_center;
x_center = (x_right - x_left).quo(golden) + x_left;
nb_eval+=1;
f_center=f(x_center);
end
else # f_center >= f_left */
x_right = x_center;
f_right = f_center;
x_center = (x_right - x_left) * golden + x_left;
nb_eval+=1;
f_center=f(x_center);
end
end while ((nb_eval < eval_max) and
((x_right - x_left) > GSL_SQRT_DBL_EPSILON * ( (x_right + x_left) * 0.5 ) + GSL_SQRT_DBL_EPSILON))
@x_lower = x_left;
@x_upper = x_right;
@x_minimum = x_center;
@f_lower = f_left;
@f_upper = f_right;
@f_minimum = f_center;
return false;
end
# Start the minimization process
# If you want to control manually the process, use brent_iterate
def iterate
k=0
bracketing if @do_bracketing
while k<@max_iteration and (@x_lower-@x_upper).abs>@epsilon
k+=1
result=brent_iterate
raise FailedIteration,"Error on iteration" if !result
begin
@log << [k, @x_lower, @x_upper, @f_lower, @f_upper, (@x_lower-@x_upper).abs, (@f_lower-@f_upper).abs]
rescue =>@e
@log << [k, @e.to_s,nil,nil,nil,nil,nil]
end
end
@iterations=k
return true
end
# Generate one iteration.
def brent_iterate
x_left = @x_lower;
x_right = @x_upper;
z = @x_minimum;
d = @e;
e = @d;
v = @v;
w = @w;
f_v = @f_v;
f_w = @f_w;
f_z = @f_minimum;
golden = 0.3819660; # golden = (3 - sqrt(5))/2 */
w_lower = (z - x_left)
w_upper = (x_right - z)
tolerance = GSL_SQRT_DBL_EPSILON * z.abs
midpoint = 0.5 * (x_left + x_right)
_p,q,r=0,0,0
if (e.abs > tolerance)
# fit parabola */
r = (z - w) * (f_z - f_v);
q = (z - v) * (f_z - f_w);
_p = (z - v) * q - (z - w) * r;
q = 2 * (q - r);
if (q > 0)
_p = -_p
else
q = -q;
end
r = e;
e = d;
end
if (_p.abs < (0.5 * q * r).abs and _p < q * w_lower and _p < q * w_upper)
t2 = 2 * tolerance ;
d = _p.quo(q);
u = z + d;
if ((u - x_left) < t2 or (x_right - u) < t2)
d = (z < midpoint) ? tolerance : -tolerance ;
end
else
e = (z < midpoint) ? x_right - z : -(z - x_left) ;
d = golden * e;
end
if ( d.abs >= tolerance)
u = z + d;
else
u = z + ((d > 0) ? tolerance : -tolerance) ;
end
@e = e;
@d = d;
f_u=f(u)
if (f_u <= f_z)
if (u < z)
@x_upper = z;
@f_upper = f_z;
else
@x_lower = z;
@f_lower = f_z;
end
@v = w;
@f_v = f_w;
@w = z;
@f_w = f_z;
@x_minimum = u;
@f_minimum = f_u;
return true;
else
if (u < z)
@x_lower = u;
@f_lower = f_u;
return true;
else
@x_upper = u;
@f_upper = f_u;
return true;
end
if (f_u <= f_w or w == z)
@v = w;
@f_v = f_w;
@w = u;
@f_w = f_u;
return true;
elsif f_u <= f_v or v == z or v == w
@v = u;
@f_v = f_u;
return true;
end
end
return false
end
end
# = Bisection Method for Minimization.
# See Unidimensional for methods.
# == Usage.
# require 'minimization'
# min=Minimization::Bisection.new(1,2 , proc {|x| (x)**3-(x)-2}
# min.iterate
# min.x_minimum
# min.f_minimum
# min.log
# Source:
# * R.L. Burden, J. Faires: Numerical Analysis
class Bisection < Unidimensional
def iterate()
ax = @lower
cx = @upper
k = 0;
while (ax-cx).abs > @epsilon and k<@max_iteration
bx = (ax + cx).quo(2);
fa = f(ax);
fb = f(bx);
fc = f(cx);
if (fa*fb <0)
cx = bx;
else (fb*fc <0)
ax = bx;
end
k +=1;
@log << [k, ax.to_f, cx.to_f, f(ax).to_f, f(cx).to_f, (ax-cx).abs.to_f, (f(ax)-f(cx)).abs.to_f]
end
if (fa