Statistics-LineFit/0040700000175500010010000000000010115635607014012 5ustar CustomerNoneStatistics-LineFit/Changes0100700000175500010010000000220510115500533015271 0ustar CustomerNone Revision history for Perl module Statistics::LineFit 0.01 Sat Nov 15 09:42:50 2003 Original version; created by ExtUtils::ModuleMaker 0.32 0.01 Mon Nov 17 15:27:57 PST 2003 Release to CPAN. 0.02 Tue Nov 18 12:22:45 PST 2003 Fixed format bug in POD documentation. 0.03 Sun Nov 24 15:25:36 PST 2003 Changed epsilon tolerance in test 14_scale_weights from 1.0e-11 to 1.0e-10 to make test pass on MSWin32-x86-multi-thread. Modified tests 02_R5ptsU1-D through 08_3ptsU1-D to run cleanly with "use strict". All test files (*.t) are now "use strict" clean and -w clean. Minor revisions to documentation. 0.04 Mon Jan 5 23:05:15 PST 2004 No significant changes. Minor revisions to documentation and code. 0.05 Tue May 18 13:39:13 PST 2004 No significant changes. Minor revisions to documentation. 0.06 Tue May 25 10:54:04 PST 2004 Added check to verify that at least two of the weights are nonzero. Minor revisions to documentation. 0.07 Wed Sep 2 09:09:36 PST 2004 Added varianceOfEstimates() method to compute the variances of the estimates of the intercept and slope. Statistics-LineFit/INSTALL0100700000175500010010000000201507760020667015047 0ustar CustomerNone If the perl CPAN module is installed and working, you can install Statistics::LineFit using: perl -MCPAN -e "install Statistics::LineFit" Otherwise, download the tar file from CPAN (see search.cpan.org) and proceed as follows: UNIX / LINUX / CYGWIN Unbundle the tar file using a command like: gunzip -c Statistics-LineFit-0.01.tar.gz | tar xvf - Then cd to the Statistics-LineFit-0.01 directory and do: perl Makefile.PL make make test make install If you want to install the module in a directory other than the default location, use something like: perl Makefile.PL PREFIX=/home/mydir/perl LIB=/home/mydir/perl/lib WINDOWS The above commands will also work from within a Cygwin window on Windows. Otherwise, unbundle the tar file using a freeware archive program like PowerArchiver and use 'nmake' rather than 'make'. MACINTOSH, OS 370, VMS, etc. If Perl is installed you will be able to install this module, but I'm not sure of the exact commands needed. The installation steps will be similar to the above sequence. Statistics-LineFit/lib/0040700000175500010010000000000010066453147014562 5ustar CustomerNoneStatistics-LineFit/lib/Statistics/0040700000175500010010000000000010115633664016713 5ustar CustomerNoneStatistics-LineFit/lib/Statistics/LineFit.pm0100700000175500010010000006426210115633664020615 0ustar CustomerNonepackage Statistics::LineFit; use strict; use Carp qw(carp); BEGIN { use Exporter (); use vars qw ($AUTHOR $VERSION @ISA @EXPORT @EXPORT_OK %EXPORT_TAGS); $AUTHOR = 'Richard Anderson '; @EXPORT = @EXPORT_OK = qw(); %EXPORT_TAGS = (); @ISA = qw(Exporter); $VERSION = 0.06; } sub new { # # Purpose: Create a new Statistics::LineFit object # my ($caller, $validate, $hush) = @_; my $self = { doneRegress => 0, gotData => 0, hush => defined $hush ? $hush : 0, validate => defined $validate ? $validate : 0, }; bless $self, ref($caller) || $caller; return $self; } sub coefficients { # # Purpose: Return the slope and intercept from least squares line fit # my $self = shift; unless (defined $self->{intercept} and defined $self->{slope}) { $self->regress() or return; } return ($self->{intercept}, $self->{slope}); } sub computeSums { # # Purpose: Compute sum of x, y, x**2, y**2 and x*y (private method) # my $self = shift; my ($sumX, $sumY, $sumXX, $sumYY, $sumXY) = (0, 0, 0, 0, 0); if (defined $self->{weight}) { for (my $i = 0; $i < $self->{numXY}; ++$i) { $sumX += $self->{weight}[$i] * $self->{x}[$i]; $sumY += $self->{weight}[$i] * $self->{y}[$i]; $sumXX += $self->{weight}[$i] * $self->{x}[$i] ** 2; $sumYY += $self->{weight}[$i] * $self->{y}[$i] ** 2; $sumXY += $self->{weight}[$i] * $self->{x}[$i] * $self->{y}[$i]; } } else { for (my $i = 0; $i < $self->{numXY}; ++$i) { $sumX += $self->{x}[$i]; $sumY += $self->{y}[$i]; $sumXX += $self->{x}[$i] ** 2; $sumYY += $self->{y}[$i] ** 2; $sumXY += $self->{x}[$i] * $self->{y}[$i]; } } return ($sumX, $sumY, $sumXX, $sumYY, $sumXY); } sub durbinWatson { # # Purpose: Return the Durbin-Watson statistic # my $self = shift; unless (defined $self->{durbinWatson}) { $self->regress() or return; my $sumErrDiff = 0; my $errorTMinus1 = $self->{y}[0] - ($self->{intercept} + $self->{slope} * $self->{x}[0]); for (my $i = 1; $i < $self->{numXY}; ++$i) { my $error = $self->{y}[$i] - ($self->{intercept} + $self->{slope} * $self->{x}[$i]); $sumErrDiff += ($error - $errorTMinus1) ** 2; $errorTMinus1 = $error; } $self->{durbinWatson} = $self->sumSqErrors() > 0 ? $sumErrDiff / $self->sumSqErrors() : 0; } return $self->{durbinWatson}; } sub meanSqError { # # Purpose: Return the mean squared error # my $self = shift; unless (defined $self->{meanSqError}) { $self->regress() or return; $self->{meanSqError} = $self->sumSqErrors() / $self->{numXY}; } return $self->{meanSqError}; } sub predictedYs { # # Purpose: Return the predicted y values # my $self = shift; unless (defined $self->{predictedYs}) { $self->regress() or return; $self->{predictedYs} = []; for (my $i = 0; $i < $self->{numXY}; ++$i) { $self->{predictedYs}[$i] = $self->{intercept} + $self->{slope} * $self->{x}[$i]; } } return @{$self->{predictedYs}}; } sub regress { # # Purpose: Do weighted or unweighted least squares 2-D line fit (if needed) # # Description: # The equations below apply to both the weighted and unweighted fit: the # weights are normalized in setWeights(), so the sum of the weights is # equal to numXY. # my $self = shift; return $self->{regressOK} if $self->{doneRegress}; unless ($self->{gotData}) { carp "No valid data input - can't do regression" unless $self->{hush}; return 0; } my ($sumX, $sumY, $sumYY, $sumXY); ($sumX, $sumY, $self->{sumXX}, $sumYY, $sumXY) = $self->computeSums(); $self->{sumSqDevX} = $self->{sumXX} - $sumX ** 2 / $self->{numXY}; if ($self->{sumSqDevX} != 0) { $self->{sumSqDevY} = $sumYY - $sumY ** 2 / $self->{numXY}; $self->{sumSqDevXY} = $sumXY - $sumX * $sumY / $self->{numXY}; $self->{slope} = $self->{sumSqDevXY} / $self->{sumSqDevX}; $self->{intercept} = ($sumY - $self->{slope} * $sumX) / $self->{numXY}; $self->{regressOK} = 1; } else { carp "Can't fit line when x values are all equal" unless $self->{hush}; $self->{sumXX} = $self->{sumSqDevX} = undef; $self->{regressOK} = 0; } $self->{doneRegress} = 1; return $self->{regressOK}; } sub residuals { # # Purpose: Return the predicted Y values minus the observed Y values # my $self = shift; unless (defined $self->{residuals}) { $self->regress() or return; $self->{residuals} = []; for (my $i = 0; $i < $self->{numXY}; ++$i) { $self->{residuals}[$i] = $self->{y}[$i] - ($self->{intercept} + $self->{slope} * $self->{x}[$i]); } } return @{$self->{residuals}}; } sub rSquared { # # Purpose: Return the correlation coefficient # my $self = shift; unless (defined $self->{rSquared}) { $self->regress() or return; my $denom = $self->{sumSqDevX} * $self->{sumSqDevY}; $self->{rSquared} = $denom != 0 ? $self->{sumSqDevXY} ** 2 / $denom : 1; } return $self->{rSquared}; } sub setData { # # Purpose: Initialize (x,y) values and optional weights # my ($self, $x, $y, $weights) = @_; $self->{doneRegress} = 0; $self->{x} = $self->{y} = $self->{numXY} = $self->{weight} = $self->{intercept} = $self->{slope} = $self->{rSquared} = $self->{sigma} = $self->{durbinWatson} = $self->{meanSqError} = $self->{sumSqErrors} = $self->{tStatInt} = $self->{tStatSlope} = $self->{predictedYs} = $self->{residuals} = $self->{sumXX} = $self->{sumSqDevX} = $self->{sumSqDevY} = $self->{sumSqDevXY} = undef; if (@$x < 2) { carp "Must input more than one data point!" unless $self->{hush}; return 0; } $self->{numXY} = @$x; if (ref $x->[0]) { $self->setWeights($y) or return 0; $self->{x} = [ ]; $self->{y} = [ ]; foreach my $xy (@$x) { push @{$self->{x}}, $xy->[0]; push @{$self->{y}}, $xy->[1]; } } else { if (@$x != @$y) { carp "Length of x and y arrays must be equal!" unless $self->{hush}; return 0; } $self->setWeights($weights) or return 0; $self->{x} = [ @$x ]; $self->{y} = [ @$y ]; } if ($self->{validate}) { unless ($self->validData()) { $self->{x} = $self->{y} = $self->{weights} = $self->{numXY} = undef; return 0; } } $self->{gotData} = 1; return 1; } sub setWeights { # # Purpose: Normalize and initialize line fit weighting factors (private method) # my ($self, $weights) = @_; return 1 unless defined $weights; if (@$weights != $self->{numXY}) { carp "Length of weight array must equal length of data array!" unless $self->{hush}; return 0; } if ($self->{validate}) { $self->validWeights($weights) or return 0 } my $sumW = my $numNonZero = 0; foreach my $weight (@$weights) { if ($weight < 0) { carp "Weights must be non-negative numbers!" unless $self->{hush}; return 0; } $sumW += $weight; if ($weight != 0) { ++$numNonZero } } if ($numNonZero < 2) { carp "At least two weights must be nonzero!" unless $self->{hush}; return 0; } my $factor = @$weights / $sumW; foreach my $weight (@$weights) { $weight *= $factor } $self->{weight} = [ @$weights ]; return 1; } sub sigma { # # Purpose: Return the estimated homoscedastic standard deviation of the # error term # my $self = shift; unless (defined $self->{sigma}) { $self->regress() or return; $self->{sigma} = $self->{numXY} > 2 ? sqrt($self->sumSqErrors() / ($self->{numXY} - 2)) : 0; } return $self->{sigma}; } sub sumSqErrors { # # Purpose: Return the sum of the squared errors (private method) # my $self = shift; unless (defined $self->{sumSqErrors}) { $self->regress() or return; $self->{sumSqErrors} = $self->{sumSqDevY} - $self->{sumSqDevX} * $self->{slope} ** 2; if ($self->{sumSqErrors} < 0) { $self->{sumSqErrors} = 0 } } return $self->{sumSqErrors}; } sub tStatistics { # # Purpose: Return the T statistics # my $self = shift; unless (defined $self->{tStatInt} and defined $self->{tStatSlope}) { $self->regress() or return; my $biasEstimateInt = $self->sigma() * sqrt($self->{sumXX} / ($self->{sumSqDevX} * $self->{numXY})); $self->{tStatInt} = $biasEstimateInt != 0 ? $self->{intercept} / $biasEstimateInt : 0; my $biasEstimateSlope = $self->sigma() / sqrt($self->{sumSqDevX}); $self->{tStatSlope} = $biasEstimateSlope != 0 ? $self->{slope} / $biasEstimateSlope : 0; } return ($self->{tStatInt}, $self->{tStatSlope}); } sub validData { # # Purpose: Verify that the input x-y data are numeric (private method) # my $self = shift; for (my $i = 0; $i < $self->{numXY}; ++$i) { if (not defined $self->{x}[$i]) { carp "Input x[$i] is not defined" unless $self->{hush}; return 0; } if ($self->{x}[$i] !~ /^([+-]?)(?=\d|\.\d)\d*(\.\d*)?([Ee]([+-]?\d+))?$/) { carp "Input x[$i] is not a number: $self->{x}[$i]" unless $self->{hush}; return 0; } if (not defined $self->{y}[$i]) { carp "Input y[$i] is not defined" unless $self->{hush}; return 0; } if ($self->{y}[$i] !~ /^([+-]?)(?=\d|\.\d)\d*(\.\d*)?([Ee]([+-]?\d+))?$/) { carp "Input y[$i] is not a number: $self->{y}[$i]" unless $self->{hush}; return 0; } } return 1; } sub validWeights { # # Purpose: Verify that the input weights are numeric (private method) # my ($self, $weights) = @_; for (my $i = 0; $i < @$weights; ++$i) { if (not defined $weights->[$i]) { carp "Input weights[$i] is not defined" unless $self->{hush}; return 0; } if ($weights->[$i] !~ /^([+-]?)(?=\d|\.\d)\d*(\.\d*)?([Ee]([+-]?\d+))?$/) { carp "Input weights[$i] is not a number: $weights->[$i]" unless $self->{hush}; return 0; } } return 1; } sub varianceOfEstimates { # # Purpose: Return the variances in the estimates of the intercept and slope # my $self = shift; unless (defined $self->{intercept} and defined $self->{slope}) { $self->regress() or return; } my @predictedYs = $self->predictedYs(); my ($s, $sx, $sxx) = (0, 0, 0); if (defined $self->{weight}) { for (my $i = 0; $i < $self->{numXY}; ++$i) { my $variance = ($predictedYs[$i] - $self->{y}[$i]) ** 2; next if 0 == $variance; $s += 1.0 / $variance; $sx += $self->{weight}[$i] * $self->{x}[$i] / $variance; $sxx += $self->{weight}[$i] * $self->{x}[$i] ** 2 / $variance; } } else { for (my $i = 0; $i < $self->{numXY}; ++$i) { my $variance = ($predictedYs[$i] - $self->{y}[$i]) ** 2; next if 0 == $variance; $s += 1.0 / $variance; $sx += $self->{x}[$i] / $variance; $sxx += $self->{x}[$i] ** 2 / $variance; } } my $denominator = ($s * $sxx - $sx ** 2); if (0 == $denominator) { return; } else { return ($sxx / $denominator, $s / $denominator); } } 1; __END__ =head1 NAME Statistics::LineFit - Least squares line fit, weighted or unweighted =head1 SYNOPSIS use Statistics::LineFit; $lineFit = Statistics::LineFit->new(); $lineFit->setData (\@xValues, \@yValues) or die "Invalid data"; ($intercept, $slope) = $lineFit->coefficients(); defined $intercept or die "Can't fit line if x values are all equal"; $rSquared = $lineFit->rSquared(); $meanSquaredError = $lineFit->meanSqError(); $durbinWatson = $lineFit->durbinWatson(); $sigma = $lineFit->sigma(); ($tStatIntercept, $tStatSlope) = $lineFit->tStatistics(); @predictedYs = $lineFit->predictedYs(); @residuals = $lineFit->residuals(); (varianceIntercept, $varianceSlope) = $lineFit->varianceOfEstimates(); =head1 DESCRIPTION The Statistics::LineFit module does weighted or unweighted least-squares line fitting to two-dimensional data (y = a + b * x). (This is also called linear regression.) In addition to the slope and y-intercept, the module can return the square of the correlation coefficient (R squared), the Durbin-Watson statistic, the mean squared error, sigma, the t statistics, the variance of the estimates of the slope and y-intercept, the predicted y values and the residuals of the y values. (See the METHODS section for a description of these statistics.) The module accepts input data in separate x and y arrays or a single 2-D array (an array of arrayrefs). The optional weights are input in a separate array. The module can optionally verify that the input data and weights are valid numbers. If weights are input, the line fit minimizes the weighted sum of the squared errors and the following statistics are weighted: the correlation coefficient, the Durbin-Watson statistic, the mean squared error, sigma and the t statistics. The module is state-oriented and caches its results. Once you call the setData() method, you can call the other methods in any order or call a method several times without invoking redundant calculations. After calling setData(), you can modify the input data or weights without affecting the module's results. The decision to use or not use weighting could be made using your a priori knowledge of the data or using supplemental data. If the data is sparse or contains non-random noise, weighting can degrade the solution. Weighting is a good option if some points are suspect or less relevant (e.g., older terms in a time series, points that are known to have more noise). =head1 ALGORITHM The least-square line is the line that minimizes the sum of the squares of the y residuals: Minimize SUM((y[i] - (a + b * x[i])) ** 2) Setting the parial derivatives of a and b to zero yields a solution that can be expressed in terms of the means, variances and covariances of x and y: b = SUM((x[i] - meanX) * (y[i] - meanY)) / SUM((x[i] - meanX) ** 2) a = meanY - b * meanX Note that a and b are undefined if all the x values are the same. If you use weights, each term in the above sums is multiplied by the value of the weight for that index. The program normalizes the weights (after copying the input values) so that the sum of the weights equals the number of points. This minimizes the differences between the weighted and unweighted equations. Statistics::LineFit uses equations that are mathematically equivalent to the above equations and computationally more efficient. The module runs in O(N) (linear time). =head1 LIMITATIONS The regression fails if the input x values are all equal or the only unequal x values have zero weights. This is an inherent limit to fitting a line of the form y = a + b * x. In this case, the module issues an error message and methods that return statistical values will return undefined values. You can also use the return value of the regress() method to check the status of the regression. As the sum of the squared deviations of the x values approaches zero, the module's results becomes sensitive to the precision of floating point operations on the host system. If the x values are not all the same and the apparent "best fit" line is vertical, the module will fit a horizontal line. For example, an input of (1, 1), (1, 7), (2, 3), (2, 5) returns a slope of zero, an intercept of 4 and an R squared of zero. This is correct behavior because this line is the best least-squares fit to the data for the given parameterization (y = a + b * x). On a 32-bit system the results are accurate to about 11 significant digits, depending on the input data. Many of the installation tests will fail on a system with word lengths of 16 bits or fewer. (You might want to upgrade your old 80286 IBM PC.) =head1 EXAMPLES =head2 Alternate calling sequence: use Statistics::LineFit; $lineFit = Statistics::LineFit->new(); $lineFit->setData(\@x, \@y) or die "Invalid regression data\n"; if (defined $lineFit->rSquared() and $lineFit->rSquared() > $threshold) { ($intercept, $slope) = $lineFit->coefficients(); print "Slope: $slope Y-intercept: $intercept\n"; } =head2 Multiple calls with same object, validate input, suppress error messages: use Statistics::LineFit; $lineFit = Statistics::LineFit->new(1, 1); while (1) { @xy = read2Dxy(); # User-supplied subroutine $lineFit->setData(\@xy); ($intercept, $slope) = $lineFit->coefficients(); if (defined $intercept) { print "Slope: $slope Y-intercept: $intercept\n"; } } =head1 METHODS The module is state-oriented and caches its results. Once you call the setData() method, you can call the other methods in any order or call a method several times without invoking redundant calculations. The regression fails if the x values are all the same. In this case, the module issues an error message and methods that return statistical values will return undefined values. You can also use the return value of the regress() method to check the status of the regression. =head2 new() - create a new Statistics::LineFit object $lineFit = Statistics::LineFit->new(); $lineFit = Statistics::LineFit->new($validate); $lineFit = Statistics::LineFit->new($validate, $hush); $validate = 1 -> Verify input data is numeric (slower execution) 0 -> Don't verify input data (default, faster execution) $hush = 1 -> Suppress error messages = 0 -> Enable error messages (default) =head2 coefficients() - Return the slope and y intercept ($intercept, $slope) = $lineFit->coefficients(); The returned list is undefined if the regression fails. =head2 durbinWatson() - Return the Durbin-Watson statistic $durbinWatson = $lineFit->durbinWatson(); The Durbin-Watson test is a test for first-order autocorrelation in the residuals of a time series regression. The Durbin-Watson statistic has a range of 0 to 4; a value of 2 indicates there is no autocorrelation. The return value is undefined if the regression fails. If weights are input, the return value is the weighted Durbin-Watson statistic. =head2 meanSqError() - Return the mean squared error $meanSquaredError = $lineFit->meanSqError(); The return value is undefined if the regression fails. If weights are input, the return value is the weighted mean squared error. =head2 predictedYs() - Return the predicted y values @predictedYs = $lineFit->predictedYs(); The returned list is undefined if the regression fails. =head2 regress() - Do the least squares line fit (if not already done) $lineFit->regress() or die "Regression failed" You don't need to call this method because it is invoked by the other methods as needed. After you call setData(), you can call regress() at any time to get the status of the regression for the current data. =head2 residuals() - Return predicted y values minus input y values @residuals = $lineFit->residuals(); The returned list is undefined if the regression fails. =head2 rSquared() - Return the square of the correlation coefficient $rSquared = $lineFit->rSquared(); R squared, also called the square of the Pearson product-moment correlation coefficient, is a measure of goodness-of-fit. It is the fraction of the variation in Y that can be attributed to the variation in X. A perfect fit will have an R squared of 1; fitting a line to the vertices of a regular polygon will yield an R squared of zero. Graphical displays of data with an R squared of less than about 0.1 do not show a visible linear trend. The return value is undefined if the regression fails. If weights are input, the return value is the weighted correlation coefficient. =head2 setData() - Initialize (x,y) values and optional weights $lineFit->setData(\@x, \@y) or die "Invalid regression data"; $lineFit->setData(\@x, \@y, \@weights) or die "Invalid regression data"; $lineFit->setData(\@xy) or die "Invalid regression data"; $lineFit->setData(\@xy, \@weights) or die "Invalid regression data"; @xy is an array of arrayrefs; x values are $xy[$i][0], y values are $xy[$i][1]. (The module does not access any indices greater than $xy[$i][1], so the arrayrefs can point to arrays that are longer than two elements.) The method identifies the difference between the first and fourth calling signatures by examining the first argument. The optional weights array must be the same length as the data array(s). The weights must be non-negative numbers; at least two of the weights must be nonzero. Only the relative size of the weights is significant: the program normalizes the weights (after copying the input values) so that the sum of the weights equals the number of points. If you want to do multiple line fits using the same weights, the weights must be passed to each call to setData(). The method will return zero if the array lengths don't match, there are less than two data points, any weights are negative or less than two of the weights are nonzero. If the new() method was called with validate = 1, the method will also verify that the data and weights are valid numbers. Once you successfully call setData(), the next call to any method other than new() or setData() invokes the regression. You can modify the input data or weights after calling setData() without affecting the module's results. =head2 sigma() - Return the standard error of the estimate $sigma = $lineFit->sigma(); Sigma is an estimate of the homoscedastic standard deviation of the error. Sigma is also known as the standard error of the estimate. The return value is undefined if the regression fails. If weights are input, the return value is the weighted standard error. =head2 tStatistics() - Return the t statistics (tStatIntercept, $tStatSlope) = $lineFit->tStatistics(); The t statistic, also called the t ratio or Wald statistic, is used to accept or reject a hypothesis using a table of cutoff values computed from the t distribution. The t-statistic suggests that the estimated value is (reasonable, too small, too large) when the t-statistic is (close to zero, large and positive, large and negative). The returned list is undefined if the regression fails. If weights are input, the returned values are the weighted t statistics. =head2 varianceOfEstimates() - Return variances of estimates of intercept, slope (varianceIntercept, $varianceSlope) = $lineFit->varianceOfEstimates(); Assuming the data are noisy or inaccurate, the intercept and slope returned by the coefficients() method are only estimates of the true intercept and slope. The varianceofEstimate() method returns the variances of the estimates of the intercept and slope, respectively. See Numerical Recipes in C, section 15.2 (Fitting Data to a Straight Line), equation 15.2.9. The returned list is undefined if the regression fails. If weights are input, the returned values are the weighted variances. =head1 SEE ALSO Mendenhall, W., and Sincich, T.L., 2003, A Second Course in Statistics: Regression Analysis, 6th ed., Prentice Hall. Press, W. H., Flannery, B. P., Teukolsky, S. A., Vetterling, W. T., 1992, Numerical Recipes in C : The Art of Scientific Computing, 2nd ed., Cambridge University Press. The man page for perl(1). The CPAN modules Statistics::OLS, Statistics::GaussHelmert and Statistics::Regression. Statistics::LineFit is simpler to use than Statistics::GaussHelmert or Statistics::Regression. Statistics::LineFit was inspired by and borrows some ideas from the venerable Statistics::OLS module. The significant differences between Statistics::LineFit and Statistics::OLS (version 0.07) are: =over 4 =item B Statistics::OLS returns incorrect results for certain input datasets. Statistics::OLS does not deep copy its input arrays, which can lead to subtle bugs. The Statistics::OLS installation test has only one test and does not verify that the regression returns correct results. In contrast, Statistics::LineFit has over 200 installation tests that use various datasets/calling sequences to verify the accuracy of the regression to within 1.0e-10. =item B For a sequence of calls to new(), setData(\@x, \@y) and regress(), Statistics::LineFit is faster than Statistics::OLS by factors of 2.0, 1.6 and 2.4 for array lengths of 5, 100 and 10000, respectively. =item B Statistics::OLS lacks this option. =item B Once you call the Statistics::LineFit::setData() method, you can call the other methods in any order and call methods multiple times without invoking redundant calculations. Statistics::LineFit lets you enable or disable data verification or error messages. =item B The code in Statistics::LineFit is more readable, more object oriented and more compliant with Perl coding standards than the code in Statistics::OLS. The documentation for Statistics::LineFit is more detailed and complete. =back =head1 AUTHOR Richard Anderson, cpan(AT)richardanderson(DOT)org, http://www.richardanderson.org =head1 LICENSE This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. The full text of the license can be found in the LICENSE file included in the distribution and available in the CPAN listing for Statistics::LineFit (see www.cpan.org or search.cpan.org). =head1 DISCLAIMER To the maximum extent permitted by applicable law, the author of this module disclaims all warranties, either express or implied, including but not limited to implied warranties of merchantability and fitness for a particular purpose, with regard to the software and the accompanying documentation. =cut Statistics-LineFit/LICENSE0100700000175500010010000005010107755462632015030 0ustar CustomerNoneTerms of Perl itself a) the GNU General Public License as published by the Free Software Foundation; either version 1, or (at your option) any later version, or b) the "Artistic License" --------------------------------------------------------------------------- The General Public License (GPL) Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 675 Mass Ave, Cambridge, MA 02139, USA. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS --------------------------------------------------------------------------- The Artistic License Preamble The intent of this document is to state the conditions under which a Package may be copied, such that the Copyright Holder maintains some semblance of artistic control over the development of the package, while giving the users of the package the right to use and distribute the Package in a more-or-less customary fashion, plus the right to make reasonable modifications. Definitions: - "Package" refers to the collection of files distributed by the Copyright Holder, and derivatives of that collection of files created through textual modification. - "Standard Version" refers to such a Package if it has not been modified, or has been modified in accordance with the wishes of the Copyright Holder. - "Copyright Holder" is whoever is named in the copyright or copyrights for the package. - "You" is you, if you're thinking about copying or distributing this Package. - "Reasonable copying fee" is whatever you can justify on the basis of media cost, duplication charges, time of people involved, and so on. (You will not be required to justify it to the Copyright Holder, but only to the computing community at large as a market that must bear the fee.) - "Freely Available" means that no fee is charged for the item itself, though there may be fees involved in handling the item. It also means that recipients of the item may redistribute it under the same conditions they received it. 1. You may make and give away verbatim copies of the source form of the Standard Version of this Package without restriction, provided that you duplicate all of the original copyright notices and associated disclaimers. 2. You may apply bug fixes, portability fixes and other modifications derived from the Public Domain or from the Copyright Holder. A Package modified in such a way shall still be considered the Standard Version. 3. You may otherwise modify your copy of this Package in any way, provided that you insert a prominent notice in each changed file stating how and when you changed that file, and provided that you do at least ONE of the following: a) place your modifications in the Public Domain or otherwise make them Freely Available, such as by posting said modifications to Usenet or an equivalent medium, or placing the modifications on a major archive site such as ftp.uu.net, or by allowing the Copyright Holder to include your modifications in the Standard Version of the Package. b) use the modified Package only within your corporation or organization. c) rename any non-standard executables so the names do not conflict with standard executables, which must also be provided, and provide a separate manual page for each non-standard executable that clearly documents how it differs from the Standard Version. d) make other distribution arrangements with the Copyright Holder. 4. You may distribute the programs of this Package in object code or executable form, provided that you do at least ONE of the following: a) distribute a Standard Version of the executables and library files, together with instructions (in the manual page or equivalent) on where to get the Standard Version. b) accompany the distribution with the machine-readable source of the Package with your modifications. c) accompany any non-standard executables with their corresponding Standard Version executables, giving the non-standard executables non-standard names, and clearly documenting the differences in manual pages (or equivalent), together with instructions on where to get the Standard Version. d) make other distribution arrangements with the Copyright Holder. 5. You may charge a reasonable copying fee for any distribution of this Package. You may charge any fee you choose for support of this Package. You may not charge a fee for this Package itself. However, you may distribute this Package in aggregate with other (possibly commercial) programs as part of a larger (possibly commercial) software distribution provided that you do not advertise this Package as a product of your own. 6. The scripts and library files supplied as input to or produced as output from the programs of this Package do not automatically fall under the copyright of this Package, but belong to whomever generated them, and may be sold commercially, and may be aggregated with this Package. 7. C or perl subroutines supplied by you and linked into this Package shall not be considered part of this Package. 8. Aggregation of this Package with a commercial distribution is always permitted provided that the use of this Package is embedded; that is, when no overt attempt is made to make this Package's interfaces visible to the end user of the commercial distribution. Such use shall not be construed as a distribution of this Package. 9. The name of the Copyright Holder may not be used to endorse or promote products derived from this software without specific prior written permission. 10. THIS PACKAGE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. The End Statistics-LineFit/Makefile.PL0100700000175500010010000000070407756076433016002 0ustar CustomerNoneuse ExtUtils::MakeMaker; # See lib/ExtUtils/MakeMaker.pm for details of how to influence # the contents of the Makefile that is written. WriteMakefile( NAME => 'Statistics::LineFit', VERSION_FROM => 'lib/Statistics/LineFit.pm', # finds $VERSION AUTHOR => 'Richard Anderson (cpan(AT)richardanderson(DOT)org)', ABSTRACT => '', PREREQ_PM => { 'Test::Simple' => 0.44, }, ); Statistics-LineFit/MANIFEST0100700000175500010010000000047107756206041015150 0ustar CustomerNoneMANIFEST INSTALL LICENSE README Todo Changes lib/Statistics/LineFit.pm Makefile.PL t/01_load.t t/02_R5ptsU1-D.t t/03_R5ptsW1-D.t t/04_4ptsU1-D.t t/05_4ptsU2-D.t t/06_4ptsW1-D.t t/07_4ptsW2-D.t t/08_3ptsU1-D.t t/09_2ptsU1-D.t t/10_bad_data.t t/11_no_data.t t/12_idempotent.t t/13_long_array.t t/14_scale_weights.t Statistics-LineFit/README0100700000175500010010000003640710115500612014667 0ustar CustomerNone See the file INSTALL for installation instructions. Contents: NAME SYNOPSIS DESCRIPTION ALGORITHM LIMITATIONS EXAMPLES METHODS SEE ALSO AUTHOR LICENSE DISCLAIMER NAME Statistics::LineFit - Least squares line fit, weighted or unweighted SYNOPSIS use Statistics::LineFit; $lineFit = Statistics::LineFit->new(); $lineFit->setData (\@xValues, \@yValues) or die "Invalid data"; ($intercept, $slope) = $lineFit->coefficients(); defined $intercept or die "Can't fit line if x values are all equal"; $rSquared = $lineFit->rSquared(); $meanSquaredError = $lineFit->meanSqError(); $durbinWatson = $lineFit->durbinWatson(); $sigma = $lineFit->sigma(); ($tStatIntercept, $tStatSlope) = $lineFit->tStatistics(); @predictedYs = $lineFit->predictedYs(); @residuals = $lineFit->residuals(); (varianceIntercept, $varianceSlope) = $lineFit->varianceOfEstimates(); DESCRIPTION The Statistics::LineFit module does weighted or unweighted least-squares line fitting to two-dimensional data (y = a + b * x). (This is also called linear regression.) In addition to the slope and y-intercept, the module can return the square of the correlation coefficient (R squared), the Durbin-Watson statistic, the mean squared error, sigma, the t statistics, the variance of the estimates of the slope and y-intercept, the predicted y values and the residuals of the y values. (See the METHODS section for a description of these statistics.) The module accepts input data in separate x and y arrays or a single 2-D array (an array of arrayrefs). The optional weights are input in a separate array. The module can optionally verify that the input data and weights are valid numbers. If weights are input, the line fit minimizes the weighted sum of the squared errors and the following statistics are weighted: the correlation coefficient, the Durbin-Watson statistic, the mean squared error, sigma and the t statistics. The module is state-oriented and caches its results. Once you call the setData() method, you can call the other methods in any order or call a method several times without invoking redundant calculations. After calling setData(), you can modify the input data or weights without affecting the module's results. The decision to use or not use weighting could be made using your a priori knowledge of the data or using supplemental data. If the data is sparse or contains non-random noise, weighting can degrade the solution. Weighting is a good option if some points are suspect or less relevant (e.g., older terms in a time series, points that are known to have more noise). ALGORITHM The least-square line is the line that minimizes the sum of the squares of the y residuals: Minimize SUM((y[i] - (a + b * x[i])) ** 2) Setting the parial derivatives of a and b to zero yields a solution that can be expressed in terms of the means, variances and covariances of x and y: b = SUM((x[i] - meanX) * (y[i] - meanY)) / SUM((x[i] - meanX) ** 2) a = meanY - b * meanX Note that a and b are undefined if all the x values are the same. If you use weights, each term in the above sums is multiplied by the value of the weight for that index. The program normalizes the weights (after copying the input values) so that the sum of the weights equals the number of points. This minimizes the differences between the weighted and unweighted equations. Statistics::LineFit uses equations that are mathematically equivalent to the above equations and computationally more efficient. The module runs in O(N) (linear time). LIMITATIONS The regression fails if the input x values are all equal or the only unequal x values have zero weights. This is an inherent limit to fitting a line of the form y = a + b * x. In this case, the module issues an error message and methods that return statistical values will return undefined values. You can also use the return value of the regress() method to check the status of the regression. As the sum of the squared deviations of the x values approaches zero, the module's results becomes sensitive to the precision of floating point operations on the host system. If the x values are not all the same and the apparent "best fit" line is vertical, the module will fit a horizontal line. For example, an input of (1, 1), (1, 7), (2, 3), (2, 5) returns a slope of zero, an intercept of 4 and an R squared of zero. This is correct behavior because this line is the best least-squares fit to the data for the given parameterization (y = a + b * x). On a 32-bit system the results are accurate to about 11 significant digits, depending on the input data. Many of the installation tests will fail on a system with word lengths of 16 bits or fewer. (You might want to upgrade your old 80286 IBM PC.) EXAMPLES Alternate calling sequence: use Statistics::LineFit; $lineFit = Statistics::LineFit->new(); $lineFit->setData(\@x, \@y) or die "Invalid regression data\n"; if (defined $lineFit->rSquared() and $lineFit->rSquared() > $threshold) { ($intercept, $slope) = $lineFit->coefficients(); print "Slope: $slope Y-intercept: $intercept\n"; } Multiple calls with same object, validate input, suppress error messages: use Statistics::LineFit; $lineFit = Statistics::LineFit->new(1, 1); while (1) { @xy = read2Dxy(); # User-supplied subroutine $lineFit->setData(\@xy); ($intercept, $slope) = $lineFit->coefficients(); if (defined $intercept) { print "Slope: $slope Y-intercept: $intercept\n"; } } METHODS The module is state-oriented and caches its results. Once you call the setData() method, you can call the other methods in any order or call a method several times without invoking redundant calculations. The regression fails if the x values are all the same. In this case, the module issues an error message and methods that return statistical values will return undefined values. You can also use the return value of the regress() method to check the status of the regression. new() - create a new Statistics::LineFit object $lineFit = Statistics::LineFit->new(); $lineFit = Statistics::LineFit->new($validate); $lineFit = Statistics::LineFit->new($validate, $hush); $validate = 1 -> Verify input data is numeric (slower execution) 0 -> Don't verify input data (default, faster execution) $hush = 1 -> Suppress error messages = 0 -> Enable error messages (default) coefficients() - Return the slope and y intercept ($intercept, $slope) = $lineFit->coefficients(); The returned list is undefined if the regression fails. durbinWatson() - Return the Durbin-Watson statistic $durbinWatson = $lineFit->durbinWatson(); The Durbin-Watson test is a test for first-order autocorrelation in the residuals of a time series regression. The Durbin-Watson statistic has a range of 0 to 4; a value of 2 indicates there is no autocorrelation. The return value is undefined if the regression fails. If weights are input, the return value is the weighted Durbin-Watson statistic. meanSqError() - Return the mean squared error $meanSquaredError = $lineFit->meanSqError(); The return value is undefined if the regression fails. If weights are input, the return value is the weighted mean squared error. predictedYs() - Return the predicted y values @predictedYs = $lineFit->predictedYs(); The returned list is undefined if the regression fails. regress() - Do the least squares line fit (if not already done) $lineFit->regress() or die "Regression failed" You don't need to call this method because it is invoked by the other methods as needed. After you call setData(), you can call regress() at any time to get the status of the regression for the current data. residuals() - Return predicted y values minus input y values @residuals = $lineFit->residuals(); The returned list is undefined if the regression fails. rSquared() - Return the square of the correlation coefficient $rSquared = $lineFit->rSquared(); R squared, also called the square of the Pearson product-moment correlation coefficient, is a measure of goodness-of-fit. It is the fraction of the variation in Y that can be attributed to the variation in X. A perfect fit will have an R squared of 1; fitting a line to the vertices of a regular polygon will yield an R squared of zero. Graphical displays of data with an R squared of less than about 0.1 do not show a visible linear trend. The return value is undefined if the regression fails. If weights are input, the return value is the weighted correlation coefficient. setData() - Initialize (x,y) values and optional weights $lineFit->setData(\@x, \@y) or die "Invalid regression data"; $lineFit->setData(\@x, \@y, \@weights) or die "Invalid regression data"; $lineFit->setData(\@xy) or die "Invalid regression data"; $lineFit->setData(\@xy, \@weights) or die "Invalid regression data"; @xy is an array of arrayrefs; x values are $xy[$i][0], y values are $xy[$i][1]. (The module does not access any indices greater than $xy[$i][1], so the arrayrefs can point to arrays that are longer than two elements.) The method identifies the difference between the first and fourth calling signatures by examining the first argument. The optional weights array must be the same length as the data array(s). The weights must be non-negative numbers; at least two of the weights must be nonzero. Only the relative size of the weights is significant: the program normalizes the weights (after copying the input values) so that the sum of the weights equals the number of points. If you want to do multiple line fits using the same weights, the weights must be passed to each call to setData(). The method will return zero if the array lengths don't match, there are less than two data points, any weights are negative or less than two of the weights are nonzero. If the new() method was called with validate = 1, the method will also verify that the data and weights are valid numbers. Once you successfully call setData(), the next call to any method other than new() or setData() invokes the regression. You can modify the input data or weights after calling setData() without affecting the module's results. sigma() - Return the standard error of the estimate $sigma = $lineFit->sigma(); Sigma is an estimate of the homoscedastic standard deviation of the error. Sigma is also known as the standard error of the estimate. The return value is undefined if the regression fails. If weights are input, the return value is the weighted standard error. tStatistics() - Return the t statistics (tStatIntercept, $tStatSlope) = $lineFit->tStatistics(); The t statistic, also called the t ratio or Wald statistic, is used to accept or reject a hypothesis using a table of cutoff values computed from the t distribution. The t-statistic suggests that the estimated value is (reasonable, too small, too large) when the t-statistic is (close to zero, large and positive, large and negative). The returned list is undefined if the regression fails. If weights are input, the returned values are the weighted t statistics. varianceOfEstimates() - Return variances of estimates of intercept, slope (varianceIntercept, $varianceSlope) = $lineFit->varianceOfEstimates(); Assuming the data are noisy or inaccurate, the intercept and slope returned by the coefficients() method are only estimates of the true intercept and slope. The varianceofEstimate() method returns the variances of the estimates of the intercept and slope, respectively. See Numerical Recipes in C, section 15.2 (Fitting Data to a Straight Line), equation 15.2.9. The returned list is undefined if the regression fails. If weights are input, the returned values are the weighted variances. SEE ALSO Mendenhall, W., and Sincich, T.L., 2003, A Second Course in Statistics: Regression Analysis, 6th ed., Prentice Hall. Press, W. H., Flannery, B. P., Teukolsky, S. A., Vetterling, W. T., 1992, Numerical Recipes in C : The Art of Scientific Computing, 2nd ed., Cambridge University Press. The man page for perl(1). The CPAN modules Statistics::OLS, Statistics::GaussHelmert and Statistics::Regression. Statistics::LineFit is simpler to use than Statistics::GaussHelmert or Statistics::Regression. Statistics::LineFit was inspired by and borrows some ideas from the venerable Statistics::OLS module. The significant differences between Statistics::LineFit and Statistics::OLS (version 0.07) are: Statistics::LineFit is more robust. Statistics::OLS returns incorrect results for certain input datasets. Statistics::OLS does not deep copy its input arrays, which can lead to subtle bugs. The Statistics::OLS installation test has only one test and does not verify that the regression returns correct results. In contrast, Statistics::LineFit has over 200 installation tests that use various datasets/calling sequences to verify the accuracy of the regression to within 1.0e-10. Statistics::LineFit is faster. For a sequence of calls to new(), setData(\@x, \@y) and regress(), Statistics::LineFit is faster than Statistics::OLS by factors of 2.0, 1.6 and 2.4 for array lengths of 5, 100 and 10000, respectively. Statistics::LineFit can do weighted or unweighted regression. Statistics::OLS lacks this option. Statistics::LineFit has a better interface. Once you call the Statistics::LineFit::setData() method, you can call the other methods in any order and call methods multiple times without invoking redundant calculations. Statistics::LineFit lets you enable or disable data verification or error messages. Statistics::LineFit has better code and documentation. The code in Statistics::LineFit is more readable, more object oriented and more compliant with Perl coding standards than the code in Statistics::OLS. The documentation for Statistics::LineFit is more detailed and complete. AUTHOR Richard Anderson, cpan(AT)richardanderson(DOT)org, http://www.richardanderson.org LICENSE This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. The full text of the license can be found in the LICENSE file included in the distribution and available in the CPAN listing for Statistics::LineFit (see www.cpan.org or search.cpan.org). DISCLAIMER To the maximum extent permitted by applicable law, the author of this module disclaims all warranties, either express or implied, including but not limited to implied warranties of merchantability and fitness for a particular purpose, with regard to the software and the accompanying documentation. Statistics-LineFit/scripts/0040700000175500010010000000000010066453147015503 5ustar CustomerNoneStatistics-LineFit/t/0040700000175500010010000000000010115635367014260 5ustar CustomerNoneStatistics-LineFit/t/01_load.t0100700000175500010010000000032207756004423015661 0ustar CustomerNone# -*- perl -*- # Check module loading use strict; use Test::More tests => 2; BEGIN { use_ok( 'Statistics::LineFit' ); } my $lineFit = Statistics::LineFit->new(); isa_ok ($lineFit, 'Statistics::LineFit'); Statistics-LineFit/t/02_R5ptsU1-D.t0100700000175500010010000000571610115505771016356 0ustar CustomerNone# -*- perl -*- # Test unweighted line fit using five points, 1-D arrays # Test multiple calls using the same object and different data use strict; use Test::More tests => 26; my $epsilon = 1.0e-12; my @x = (1, 2, 3, 4, 5); my @y = (1, 2, 3, 4, 5); eval { use Statistics::LineFit; my $lineFit = Statistics::LineFit->new(); is($lineFit->setData(\@x, \@y), 1, 'setData(\@x, \@y)'); my @coefficients = $lineFit->coefficients(); is_deeply(\@coefficients, [ (0, 1) ], 'coefficients()'); is($lineFit->durbinWatson(), 0, 'durbinWatson()'); is($lineFit->meanSqError(), 0, 'meanSqError()'); my @predictedY = $lineFit->predictedYs(); is_deeply(\@predictedY, [ (1, 2, 3, 4, 5) ], 'predictedYs()'); my @residuals = $lineFit->residuals(); is_deeply(\@residuals, [ (0, 0, 0, 0, 0) ], 'residuals()'); is($lineFit->rSquared(), 1, 'rSquared()'); is($lineFit->sigma(), 0, 'sigma()'); my @tStatistics = $lineFit->tStatistics(); is_deeply(\@tStatistics, [ (0, 0) ], 'tStatistics()'); my @varianceOfEstimates = $lineFit->varianceOfEstimates(); is(@varianceOfEstimates, 0, 'varianceOfEstimates()'); my $sumSqErrors = 0; foreach my $residual (@residuals) { $sumSqErrors += $residual ** 2 } cmp_ok(abs($sumSqErrors - $lineFit->sumSqErrors()), "<", $epsilon, 'sumSqErrors()'); @x = (-1, -2, 3, 4); @y = (-1.02, 1.9, -3.2, 5); is($lineFit->setData(\@x, \@y), 1, 'setData(\@x, \@y)'); @coefficients = $lineFit->coefficients(); cmp_ok(abs($coefficients[0] - 0.48), "<", $epsilon, 'coefficients[0]'); cmp_ok(abs($coefficients[1] - 0.19), "<", $epsilon, 'coefficients[1]'); cmp_ok(abs($lineFit->durbinWatson() - 2.9721742266432), "<", $epsilon, 'durbinWatson()'); cmp_ok(abs($lineFit->meanSqError() - 9.28905), "<", $epsilon, 'meanSqError()'); @predictedY = $lineFit->predictedYs(); is_deeply(\@predictedY, [ (0.29, 0.1, 1.05, 1.24) ], 'predictedYs()'); @residuals = $lineFit->residuals(); is_deeply(\@residuals, [ (-1.31, 1.8, -4.25, 3.76) ], 'predictedYs()'); cmp_ok(abs($lineFit->rSquared() - 0.0246385333431334), "<", $epsilon, 'rSquared()'); cmp_ok(abs($lineFit->sigma() - 4.31023201231674), "<", $epsilon, 'sigma()'); @tStatistics = $lineFit->tStatistics(); cmp_ok(abs($tStatistics[0] - 0.20734646307841), "<", $epsilon, 'tStatistics[0]'); cmp_ok(abs($tStatistics[1] - 0.224770663113769), "<", $epsilon, 'tStatistics[0]'); @varianceOfEstimates = $lineFit->varianceOfEstimates(); cmp_ok(abs($varianceOfEstimates[0] - 1.17115479964839), "<", $epsilon, 'varianceOfEstimates[0]'); cmp_ok(abs($varianceOfEstimates[1] - 0.345662317339679), "<", $epsilon, 'varianceOfEstimates[0]'); $sumSqErrors = 0; foreach my $residual (@residuals) { $sumSqErrors += $residual ** 2 } cmp_ok(abs($sumSqErrors - $lineFit->sumSqErrors()), "<", $epsilon, 'sumSqErrors()'); }; is($@, '', 'eval error trap'); Statistics-LineFit/t/03_R5ptsW1-D.t0100700000175500010010000000643310115634746016363 0ustar CustomerNone# -*- perl -*- # Test weighted line fit using five points, 1-D arrays # Test weighted line fit followed by unweighted line fit using the same object use strict; use Test::More tests => 29; my $epsilon = 1.0e-12; my @x = (1, 2, 3, 4, 5); my @y = (1, 2, 100, 4, 5); my @weights = (1, 1, 0, 1, 1); eval { use Statistics::LineFit; my $lineFit = Statistics::LineFit->new(); is($lineFit->setData(\@x, \@y, \@weights), 1, 'setData(\@x, \@y, \@weights)'); my @coefficients = $lineFit->coefficients(); is_deeply(\@coefficients, [ (0, 1) ], 'coefficients()'); is($lineFit->durbinWatson(), 0, 'durbinWatson()'); is($lineFit->meanSqError(), 0, 'meanSqError()'); my @predictedY = $lineFit->predictedYs(); is_deeply(\@predictedY, [ (1, 2, 3, 4, 5) ], 'predictedYs()'); my @residuals = $lineFit->residuals(); is_deeply(\@residuals, [ (0, 0, 97, 0, 0) ], 'residuals()'); is($lineFit->rSquared(), 1, 'rSquared()'); is($lineFit->sigma(), 0, 'sigma()'); my @tStatistics = $lineFit->tStatistics(); is_deeply(\@tStatistics, [ (0, 0) ], 'tStatistics()'); my @varianceOfEstimates = $lineFit->varianceOfEstimates(); is(@varianceOfEstimates, 0, 'varianceOfEstimates()'); my $sumSqErrors = 0; for (my $i = 0; $i < @residuals; ++$i) { $sumSqErrors += $residuals[$i] ** 2 * $weights[$i]; } cmp_ok(abs($sumSqErrors - $lineFit->sumSqErrors()), "<", $epsilon, 'sumSqErrors()'); @x = (1, 2, 3, 4); @y = (1.2, 1.9, 3.1, 4.2); is($lineFit->setData(\@x, \@y), 1, 'setData(\@x, \@y)'); @coefficients = $lineFit->coefficients(); cmp_ok(abs($coefficients[0] - 0.0499999999999972), "<", $epsilon, 'coefficients[0]'); cmp_ok(abs($coefficients[1] - 1.02), "<", $epsilon, 'coefficients[1]'); cmp_ok(abs($lineFit->durbinWatson() - 2.43448275862085), "<", $epsilon, 'durbinWatson()'); cmp_ok(abs($lineFit->meanSqError() - 0.0144999999999991), "<", $epsilon, 'meanSqError()'); @predictedY = $lineFit->predictedYs(); is_deeply(\@predictedY, [ (1.07, 2.09, 3.11, 4.13) ], 'predictedYs()'); @residuals = $lineFit->residuals(); my @results = (0.130000000000002, -0.189999999999999, -0.00999999999999979, 0.0699999999999994); for (my $i = 0; $i < @residuals; ++$i) { cmp_ok(abs($residuals[$i] - $results[$i]), "<", $epsilon, 'residuals()'); } cmp_ok(abs($lineFit->rSquared() - 0.988973384030419), "<", $epsilon, 'rSquared()'); cmp_ok(abs($lineFit->sigma() - 0.170293863659259), "<", $epsilon, 'sigma()'); @tStatistics = $lineFit->tStatistics(); cmp_ok(abs($tStatistics[0] - 0.239731650742686), "<", $epsilon, 'tStatistics[0]'); cmp_ok(abs($tStatistics[1] - 13.3932561516921), "<", $epsilon, 'tStatistics[0]'); @varianceOfEstimates = $lineFit->varianceOfEstimates(); cmp_ok(abs($varianceOfEstimates[0] - 0.0193944771559506), "<", $epsilon, 'varianceOfEstimates[0]'); cmp_ok(abs($varianceOfEstimates[1] - 0.00213610609637551), "<", $epsilon, 'varianceOfEstimates[0]'); $sumSqErrors = 0; foreach my $residual (@residuals) { $sumSqErrors += $residual ** 2 } cmp_ok(abs($sumSqErrors - $lineFit->sumSqErrors()), "<", $epsilon, 'sumSqErrors()'); }; is($@, '', 'eval error trap'); Statistics-LineFit/t/04_4ptsU1-D.t0100700000175500010010000000412310115634764016231 0ustar CustomerNone# -*- perl -*- # Test unweighted line fit using four points, 1-D arrays use strict; use Test::More tests => 18; my $epsilon = 1.0e-12; my @x = (1, 2, 3, 4); my @y = (1.2, 1.9, 3.1, 4.2,); eval { use Statistics::LineFit; my $lineFit = Statistics::LineFit->new(); is($lineFit->setData(\@x, \@y), 1, 'setData(\@x, \@y)'); my @coefficients = $lineFit->coefficients(); cmp_ok(abs($coefficients[0] - 0.0499999999999972), "<", $epsilon, 'coefficients[0]'); cmp_ok(abs($coefficients[1] - 1.02), "<", $epsilon, 'coefficients[1]'); cmp_ok(abs($lineFit->durbinWatson() - 2.43448275862085), "<", $epsilon, 'durbinWatson()'); cmp_ok(abs($lineFit->meanSqError() - 0.0144999999999991), "<", $epsilon, 'meanSqError()'); my @predictedY = $lineFit->predictedYs(); is_deeply(\@predictedY, [ (1.07, 2.09, 3.11, 4.13) ], 'predictedYs()'); my @residuals = $lineFit->residuals(); my @results = (0.130000000000002, -0.189999999999999, -0.00999999999999979, 0.0699999999999994); for (my $i = 0; $i < @residuals; ++$i) { cmp_ok(abs($residuals[$i] - $results[$i]), "<", $epsilon, 'residuals()'); } cmp_ok(abs($lineFit->rSquared() - 0.988973384030419), "<", $epsilon, 'rSquared()'); cmp_ok(abs($lineFit->sigma() - 0.170293863659259), "<", $epsilon, 'sigma()'); my @tStatistics = $lineFit->tStatistics(); cmp_ok(abs($tStatistics[0] - 0.239731650742686), "<", $epsilon, 'tStatistics[0]'); cmp_ok(abs($tStatistics[1] - 13.3932561516921), "<", $epsilon, 'tStatistics[0]'); my @varianceOfEstimates = $lineFit->varianceOfEstimates(); cmp_ok(abs($varianceOfEstimates[0] - 0.0193944771559506), "<", $epsilon, 'varianceOfEstimates[0]'); cmp_ok(abs($varianceOfEstimates[1] - 0.00213610609637551), "<", $epsilon, 'varianceOfEstimates[0]'); my $sumSqErrors = 0; foreach my $residual (@residuals) { $sumSqErrors += $residual ** 2 } cmp_ok(abs($sumSqErrors - $lineFit->sumSqErrors()), "<", $epsilon, 'sumSqErrors()'); }; is($@, '', 'eval error trap'); Statistics-LineFit/t/05_4ptsU2-D.t0100700000175500010010000000422410115635003016220 0ustar CustomerNone# -*- perl -*- # Test unweighted line fit using four points, 2-D array use strict; use Test::More tests => 18; my $epsilon = 1.0e-12; my @x = (1, 2, 3, 4); my @y = (1.2, 1.9, 3.1, 4.2,); my @xy; for (my $i = 0; $i < @x; ++$i) { $xy[$i] = [ ($x[$i], $y[$i]) ] } eval { use Statistics::LineFit; my $lineFit = Statistics::LineFit->new(); is($lineFit->setData(\@xy), 1, 'setData(\@xy)'); my @coefficients = $lineFit->coefficients(); cmp_ok(abs($coefficients[0] - 0.0499999999999972), "<", $epsilon, 'coefficients[0]'); cmp_ok(abs($coefficients[1] - 1.02), "<", $epsilon, 'coefficients[1]'); cmp_ok(abs($lineFit->durbinWatson() - 2.43448275862085), "<", $epsilon, 'durbinWatson()'); cmp_ok(abs($lineFit->meanSqError() - 0.0144999999999991), "<", $epsilon, 'meanSqError()'); my @predictedY = $lineFit->predictedYs(); is_deeply(\@predictedY, [ (1.07, 2.09, 3.11, 4.13) ], 'predictedYs()'); my @residuals = $lineFit->residuals(); my @results = (0.130000000000002, -0.189999999999999, -0.00999999999999979, 0.0699999999999994); for (my $i = 0; $i < @residuals; ++$i) { cmp_ok(abs($residuals[$i] - $results[$i]), "<", $epsilon, 'residuals()'); } cmp_ok(abs($lineFit->rSquared() - 0.988973384030419), "<", $epsilon, 'rSquared()'); cmp_ok(abs($lineFit->sigma() - 0.170293863659259), "<", $epsilon, 'sigma()'); my @tStatistics = $lineFit->tStatistics(); cmp_ok(abs($tStatistics[0] - 0.239731650742686), "<", $epsilon, 'tStatistics[0]'); cmp_ok(abs($tStatistics[1] - 13.3932561516921), "<", $epsilon, 'tStatistics[0]'); my @varianceOfEstimates = $lineFit->varianceOfEstimates(); cmp_ok(abs($varianceOfEstimates[0] - 0.0193944771559506), "<", $epsilon, 'varianceOfEstimates[0]'); cmp_ok(abs($varianceOfEstimates[1] - 0.00213610609637551), "<", $epsilon, 'varianceOfEstimates[0]'); my $sumSqErrors = 0; foreach my $residual (@residuals) { $sumSqErrors += $residual ** 2 } cmp_ok(abs($sumSqErrors - $lineFit->sumSqErrors()), "<", $epsilon, 'sumSqErrors()'); }; is($@, '', 'eval error trap'); Statistics-LineFit/t/06_4ptsW1-D.t0100700000175500010010000000467310115635020016231 0ustar CustomerNone# -*- perl -*- # Test weighted line fit using four points, 1-D arrays # Test alternate calling sequence (compared to test 04_4ptsU1-D) use strict; use Test::More tests => 21; my $epsilon = 1.0e-12; my @x = (1, 2, 3, 4); my @y = (1.2, 1.9, 3.1, 4.2,); my @weights = (0.1, 0.3, 0.2, 0.4); eval { use Statistics::LineFit; my $lineFit = Statistics::LineFit->new(1); is($lineFit->setData(\@x, \@y, \@weights), 1, 'setData(\@x, \@y, \@weights)'); cmp_ok(abs($lineFit->rSquared() - 0.991165853485171), "<", $epsilon, 'rSquared()'); cmp_ok(abs($lineFit->durbinWatson() - 3.48475090763877), "<", $epsilon, 'durbinWatson()'); my @tStatistics = $lineFit->tStatistics(); cmp_ok(abs($tStatistics[0] - -0.544629105447965), "<", $epsilon, 'tStatistics[0]'); cmp_ok(abs($tStatistics[1] - 14.9797948208089), "<", $epsilon, 'tStatistics[0]'); my @varianceOfEstimates = $lineFit->varianceOfEstimates(); cmp_ok(abs($varianceOfEstimates[0] - 3.85435204917384e-05), "<", $epsilon, 'varianceOfEstimates[0]'); cmp_ok(abs($varianceOfEstimates[1] - 5.24491637451941e-06), "<", $epsilon, 'varianceOfEstimates[0]'); my @predictedY = $lineFit->predictedYs(); my @results = (0.95229357798165, 2.02477064220183, 3.09724770642202, 4.1697247706422); for (my $i = 0; $i < @predictedY; ++$i) { cmp_ok(abs($predictedY[$i] - $results[$i]), "<", $epsilon, 'predictedY()'); } my @residuals = $lineFit->residuals(); @results = (0.24770642201835, -0.124770642201834, 0.00275229357798157, 0.0302752293577973); for (my $i = 0; $i < @residuals; ++$i) { cmp_ok(abs($residuals[$i] - $results[$i]), "<", $epsilon, 'residuals()'); } cmp_ok(abs($lineFit->meanSqError() - 0.011174311926607), "<", $epsilon, 'meanSqError()'); cmp_ok(abs($lineFit->sigma() - 0.149494561283058), "<", $epsilon, 'sigma()'); my @coefficients = $lineFit->coefficients(); cmp_ok(abs($coefficients[0] - -0.120183486238534), "<", $epsilon, 'coefficients[0]'); cmp_ok(abs($coefficients[1] - 1.07247706422018), "<", $epsilon, 'coefficients[1]'); my $sumSqErrors = 0; for (my $i = 0; $i < @residuals; ++$i) { $sumSqErrors += $residuals[$i] ** 2 * $weights[$i]; } cmp_ok(abs($sumSqErrors - $lineFit->sumSqErrors()), "<", $epsilon, 'sumSqErrors()'); }; is($@, '', 'eval error trap'); Statistics-LineFit/t/07_4ptsW2-D.t0100700000175500010010000000476210115635075016244 0ustar CustomerNone# -*- perl -*- # Test weighted line fit using four points, 2-D array # Test alternate calling sequence (compared to test 05_4ptsU2-D) use strict; use Test::More tests => 21; my $epsilon = 1.0e-12; my @x = (1, 2, 3, 4); my @y = (1.2, 1.9, 3.1, 4.2,); my @xy; for (my $i = 0; $i < @x; ++$i) { $xy[$i] = [ ($x[$i], $y[$i]) ] } my @weights = (0.1, 0.3, 0.2, 0.4); eval { use Statistics::LineFit; my $lineFit = Statistics::LineFit->new(); is($lineFit->setData(\@xy, \@weights), 1, 'setData(\@xy, \@weights)'); cmp_ok(abs($lineFit->sigma() - 0.149494561283058), "<", $epsilon, 'sigma()'); my @coefficients = $lineFit->coefficients(); cmp_ok(abs($coefficients[0] - -0.120183486238534), "<", $epsilon, 'coefficients[0]'); cmp_ok(abs($coefficients[1] - 1.07247706422018), "<", $epsilon, 'coefficients[1]'); my @residuals = $lineFit->residuals(); my @results = (0.24770642201835, -0.124770642201834, 0.00275229357798157, 0.0302752293577973); for (my $i = 0; $i < @residuals; ++$i) { cmp_ok(abs($residuals[$i] - $results[$i]), "<", $epsilon, 'residuals()'); } cmp_ok(abs($lineFit->rSquared() - 0.991165853485171), "<", $epsilon, 'rSquared()'); my @tStatistics = $lineFit->tStatistics(); cmp_ok(abs($tStatistics[0] - -0.544629105447965), "<", $epsilon, 'tStatistics[0]'); cmp_ok(abs($tStatistics[1] - 14.9797948208089), "<", $epsilon, 'tStatistics[0]'); my @varianceOfEstimates = $lineFit->varianceOfEstimates(); cmp_ok(abs($varianceOfEstimates[0] - 3.85435204917384e-05), "<", $epsilon, 'varianceOfEstimates[0]'); cmp_ok(abs($varianceOfEstimates[1] - 5.24491637451941e-06), "<", $epsilon, 'varianceOfEstimates[0]'); my $sumSqErrors = 0; for (my $i = 0; $i < @residuals; ++$i) { $sumSqErrors += $residuals[$i] ** 2 * $weights[$i]; } cmp_ok(abs($sumSqErrors - $lineFit->sumSqErrors()), "<", $epsilon, 'sumSqErrors()'); cmp_ok(abs($lineFit->meanSqError() - 0.011174311926607), "<", $epsilon, 'meanSqError()'); cmp_ok(abs($lineFit->durbinWatson() - 3.48475090763877), "<", $epsilon, 'durbinWatson()'); my @predictedY = $lineFit->predictedYs(); @results = (0.95229357798165, 2.02477064220183, 3.09724770642202, 4.1697247706422); for (my $i = 0; $i < @predictedY; ++$i) { cmp_ok(abs($predictedY[$i] - $results[$i]), "<", $epsilon, 'predictedY()'); } }; is($@, '', 'eval error trap'); Statistics-LineFit/t/08_3ptsU1-D.t0100700000175500010010000000405110115635112016220 0ustar CustomerNone# -*- perl -*- # Test unweighted line fit, three points (an equilateral triangle), 1-D arrays use strict; use Test::More tests => 19; my $epsilon = 1.0e-12; my @x = (1, 1.5, 2); my @y = (1, 1.866025403784438, 1); eval { use Statistics::LineFit; my $lineFit = Statistics::LineFit->new(); is($lineFit->setData(\@x, \@y), 1, 'setData(\@x, \@y)'); my @coefficients = $lineFit->coefficients(); cmp_ok(abs($coefficients[0] - 1.28867513459481), "<", $epsilon, 'coefficients[0]'); is($coefficients[1], 0, 'coefficients[1]'); is($lineFit->durbinWatson(), 3, 'durbinWatson()'); cmp_ok(abs($lineFit->meanSqError() - 0.166666666666666), "<", $epsilon, 'meanSqError()'); my @predictedY = $lineFit->predictedYs(); my @results = (1.28867513459481, 1.28867513459481, 1.28867513459481); for (my $i = 0; $i < @predictedY; ++$i) { cmp_ok(abs($predictedY[$i] - $results[$i]), "<", $epsilon, 'predictedY()'); } my @residuals = $lineFit->residuals(); @results = (-0.288675134594813, 0.577350269189625, -0.288675134594813); for (my $i = 0; $i < @residuals; ++$i) { cmp_ok(abs($residuals[$i] - $results[$i]), "<", $epsilon, 'residuals()'); } is($lineFit->rSquared(), 0, 'rSquared()'); cmp_ok(abs($lineFit->sigma() - 0.707106781186547), "<", $epsilon, 'sigma()'); my @tStatistics = $lineFit->tStatistics(); cmp_ok(abs($tStatistics[0] - 0.828962859079729), "<", $epsilon, 'tStatistics[0]'); is($tStatistics[1], 0, 'tStatistics[1]'); my @varianceOfEstimates = $lineFit->varianceOfEstimates(); cmp_ok(abs($varianceOfEstimates[0] - 0.412037037037036), "<", $epsilon, 'varianceOfEstimates[0]'); cmp_ok(abs($varianceOfEstimates[1] - 0.166666666666666), "<", $epsilon, 'varianceOfEstimates[1]'); my $sumSqErrors = 0; foreach my $residual (@residuals) { $sumSqErrors += $residual ** 2 } cmp_ok(abs($sumSqErrors - $lineFit->sumSqErrors()), "<", $epsilon, 'sumSqErrors()'); }; is($@, '', 'eval error trap'); Statistics-LineFit/t/09_2ptsU1-D.t0100700000175500010010000000207310115635123016224 0ustar CustomerNone# -*- perl -*- # Test unweighted line fit using two points, 1-D arrays use strict; use Test::More tests => 11; my @x = (1000, 2000); my @y = (-1000, -2000); eval { use Statistics::LineFit; my $lineFit = Statistics::LineFit->new(); is($lineFit->setData(\@x, \@y), 1, 'setData(\@x, \@y)'); my @coefficients = $lineFit->coefficients(); is_deeply(\@coefficients, [ (0, -1) ], 'coefficients()'); is($lineFit->durbinWatson(), 0, 'durbinWatson()'); is($lineFit->meanSqError(), 0, 'meanSqError()'); my @predictedY = $lineFit->predictedYs(); is_deeply(\@predictedY, [ (-1000, -2000) ], 'predictedYs()'); my @residuals = $lineFit->residuals(); is_deeply(\@residuals, [ (0, 0) ], 'residuals()'); is($lineFit->rSquared(), 1, 'rSquared()'); is($lineFit->sigma(), 0, 'sigma()'); my @tStatistics = $lineFit->tStatistics(); is_deeply(\@tStatistics, [ (0, 0) ], 'tStatistics()'); my @varianceOfEstimates = $lineFit->varianceOfEstimates(); is(@varianceOfEstimates, 0, 'varianceOfEstimates()'); }; is($@, '', 'eval error trap'); Statistics-LineFit/t/10_bad_data.t0100700000175500010010000000466410053250555016467 0ustar CustomerNone# -*- perl -*- # Test calling setData() with invalid data use strict; use Test::More tests => 18; eval { use Statistics::LineFit; my $lineFit = Statistics::LineFit->new(0, 1); # Only one point my @x = (0); my @y = (-1); my @xy = ( [ (0, 1) ] ); is($lineFit->setData(\@x, \@y), 0, 'setData(\@x, \@y)'); is($lineFit->setData(\@xy), 0, 'setData(\@xy)'); # Data array lengths not equal @x = (0, 1, 2); @y = (-1, 0, 2, 3); is($lineFit->setData(\@x, \@y), 0, 'setData(\@x, \@y)'); my ($intercept, $slope) = $lineFit->coefficients(); ok(! defined $intercept, 'coefficients[0]'); ok(! defined $slope, 'coefficients[1]'); # Weights arrray length is not equal to length of data arrays @x = (0, 1, 2); @y = (-1, 0, 2); @xy = ( [ (0, 1) ], [ (0, 1) ], [ (0, 1) ] ); my @weights = (1, 2); is($lineFit->setData(\@x, \@y, \@weights), 0, 'setData(\@x, \@y, \@weights)'); is($lineFit->setData(\@xy, \@weights), 0, 'setData(\@xy, \@weights)'); # Negative weights not allowed @weights = (-1, 2, 3); @x = (0, 1, 2); @y = (-1, 0, 2); @xy = ( [ (0, 1) ], [ (0, 1) ], [ (0, 1) ] ); is($lineFit->setData(\@x, \@y, \@weights), 0, 'setData(\@x, \@y, \@weights)'); is($lineFit->setData(\@xy, \@weights), 0, 'setData(\@xy, \@weights)'); # Weights must contain at least two nonzero values @weights = (1, 0, 0); is($lineFit->setData(\@x, \@y, \@weights), 0, 'setData(\@x, \@y, \@weights)'); is($lineFit->setData(\@xy, \@weights), 0, 'setData(\@xy, \@weights)'); # Data arrays contain non-numeric data (validate = 1) $lineFit = Statistics::LineFit->new(1, 1); @x = (0, 1, 2); @y = (-1, 0, '1.0 e+3'); is($lineFit->setData(\@x, \@y), 0, 'setData(\@x, \@y)'); @x = ('- 1.0', 0, 1); @y = (-1, 0, 3); is($lineFit->setData(\@x, \@y), 0, 'setData(\@x, \@y)'); @x = (undef, 0, 1); is($lineFit->setData(\@x, \@y), 0, 'setData(\@x, \@y)'); @x = (0, 1, 2); @y = (-1, 0, undef); is($lineFit->setData(\@x, \@y), 0, 'setData(\@x, \@y)'); # Weight array contains non-numeric data (validate = 1) @weights = (1, '2.0,', 3); @x = (0, 1, 2); @y = (-1, 0, 4); is($lineFit->setData(\@x, \@y, \@weights), 0, 'setData(\@x, \@y, \@weights)'); @weights = (1, 2, undef); is($lineFit->setData(\@x, \@y, \@weights), 0, 'setData(\@x, \@y, \@weights)'); }; is($@, '', 'eval error trap'); Statistics-LineFit/t/11_no_data.t0100700000175500010010000000210710115635146016346 0ustar CustomerNone# -*- perl -*- # Verify that methods return undefined values if setData() was not called use strict; use Test::More tests => 13; eval { use Statistics::LineFit; my $lineFit = Statistics::LineFit->new(0, 1); my ($intercept, $slope) = $lineFit->coefficients(); ok(! defined $intercept, 'coefficients[0]'); ok(! defined $slope, 'coefficients[1]'); ok(! defined $lineFit->durbinWatson(), 'durbinWatson()'); ok(! defined $lineFit->meanSqError(), 'meanSqError()'); ok(! defined $lineFit->predictedYs(), 'predictedYs()'); ok(! defined $lineFit->residuals(), 'residuals()'); ok(! defined $lineFit->rSquared(), 'rSquared()'); ok(! defined $lineFit->sigma(), 'sigma()'); my ($tStatIntercept, $tStatSlope) = $lineFit->tStatistics(); ok(! defined $tStatIntercept, 'tStatIntercept'); ok(! defined $tStatSlope, 'tStatSlope'); my ($varianceIntercept, $varianceSlope) = $lineFit->varianceOfEstimates(); ok(! defined $varianceIntercept, 'varianceIntercept'); ok(! defined $varianceSlope, 'varianceSlope'); }; is($@, '', 'eval error trap'); Statistics-LineFit/t/12_idempotent.t0100700000175500010010000000267110115635161017115 0ustar CustomerNone# -*- perl -*- # Verify that the methods return the same results on repeated calls use strict; use Test::More tests => 11; my @x = (1, 2, 3, 4); my @y = (1.2, 2.9, 19.1, 15.2,); eval { use Statistics::LineFit; my $lineFit = Statistics::LineFit->new(); is($lineFit->setData(\@x, \@y), 1, 'setData(\@x, \@y)'); my @coeff1 = $lineFit->coefficients(); my @coeff2 = $lineFit->coefficients(); is_deeply(\@coeff1, \@coeff2, 'coefficients[0]'); is($lineFit->durbinWatson(), $lineFit->durbinWatson(), 'durbinWatson()'); is($lineFit->meanSqError(), $lineFit->meanSqError(), 'meanSqError()'); my @predictedY1 = $lineFit->predictedYs(); my @predictedY2 = $lineFit->predictedYs(); is_deeply(\@predictedY1, \@predictedY2, 'predictedYs()'); my @residuals1 = $lineFit->residuals(); my @residuals2 = $lineFit->residuals(); is_deeply(\@residuals1, \@residuals2, 'residuals()'); is($lineFit->rSquared(), $lineFit->rSquared(), 'rSquared()'); is($lineFit->sigma(), $lineFit->sigma(), 'sigma()'); my @tStatistics1 = $lineFit->tStatistics(); my @tStatistics2 = $lineFit->tStatistics(); is_deeply(\@tStatistics1, \@tStatistics2, 'tStatistics[0]'); my @varianceOfEstimates1 = $lineFit->varianceOfEstimates(); my @varianceOfEstimates2 = $lineFit->varianceOfEstimates(); is_deeply(\@varianceOfEstimates1, \@varianceOfEstimates2, 'varianceOfEstimates[0]'); }; is($@, '', 'eval error trap'); Statistics-LineFit/t/13_long_array.t0100700000175500010010000000303510115635367017106 0ustar CustomerNone# -*- perl -*- # Test unweighted line fit using 100,000 points, 1-D arrays use strict; use Test::More tests => 12; my $epsilon = 1.0e-10; my (@x, @y); for (my $i = 0; $i < 100000; ++$i) { $x[$i] = $i; $y[$i] = sqrt($i); } eval { use Statistics::LineFit; my $lineFit = Statistics::LineFit->new(); is($lineFit->setData(\@x, \@y), 1, 'setData(\@x, \@y)'); my @coefficients = $lineFit->coefficients(); cmp_ok(abs($coefficients[0] - 84.3254986214062), "<", $epsilon, 'coefficients[0]'); cmp_ok(abs($coefficients[1] - 0.00252985387534101), "<", $epsilon, 'coefficients[1]'); cmp_ok(abs($lineFit->durbinWatson() - 1.31194787800295e-07), "<", $epsilon, 'durbinWatson()'); cmp_ok(abs($lineFit->meanSqError() - 222.255904069674), "<", $epsilon, 'meanSqError()'); cmp_ok(abs($lineFit->rSquared() - 0.95999514370284), "<", $epsilon, 'rSquared()'); cmp_ok(abs($lineFit->sigma() - 14.9083986154335), "<", $epsilon, 'sigma()'); my @tStatistics = $lineFit->tStatistics(); cmp_ok(abs($tStatistics[0] - 894.336968406119), "<", $epsilon, 'tStatistics[0]'); cmp_ok(abs($tStatistics[1] - 1549.07989604865), "<", $epsilon, 'tStatistics[0]'); my @varianceOfEstimates = $lineFit->varianceOfEstimates(); cmp_ok(abs($varianceOfEstimates[0] - 8.96982691422364e-08), "<", $epsilon, 'varianceOfEstimates[0]'); cmp_ok(abs($varianceOfEstimates[1] - 0.0), "<", $epsilon, 'varianceOfEstimates[0]'); }; is($@, '', 'eval error trap'); Statistics-LineFit/t/14_scale_weights.t0100700000175500010010000000744710115635310017572 0ustar CustomerNone# -*- perl -*- # Test that multiplying the weights by a constant does not change results use strict; use Test::More tests => 25; my $epsilon = 1.0e-10; my (@x, @y, @weights); my $n = 100; for (my $i = 0; $i < $n; ++$i) { $x[$i] = $i + 1; $y[$i] = $i ** 0.75; if ($i % 3 == 0) { $weights[$i] = $i; } elsif ($i % 2 == 0) { $weights[$i] = 50; } else { $weights[$i] = $n + 1 - $i; } } eval { use Statistics::LineFit; my $lineFit = Statistics::LineFit->new(); # Test absolute value of results is($lineFit->setData(\@x, \@y, \@weights), 1, 'setData1(\@x, \@y, \@weights)'); my $rSquared1 = $lineFit->rSquared(); cmp_ok(abs($rSquared1 - 0.991788159320494), "<", $epsilon, 'rSquared1()'); my $durbinWatson1 = $lineFit->durbinWatson(); cmp_ok(abs($durbinWatson1 - 0.0192675216315352), "<", $epsilon, 'durbinWatson1()'); my @tStatistics1 = $lineFit->tStatistics(); cmp_ok(abs($tStatistics1[0] - 18.7895097501605), "<", $epsilon, 'tStatistics1[0]'); cmp_ok(abs($tStatistics1[1] - 108.793322452779), "<", $epsilon, 'tStatistics1[0]'); my @varianceOfEstimates1 = $lineFit->varianceOfEstimates(); cmp_ok(abs($varianceOfEstimates1[0] - 0.000758134044735196), "<", $epsilon, 'varianceOfEstimates1[0]'); cmp_ok(abs($varianceOfEstimates1[1] - 1.78672521866159e-07), "<", $epsilon, 'varianceOfEstimates1[0]'); my $meanSqError1 = $lineFit->meanSqError(); cmp_ok(abs($meanSqError1 - 0.603149693112746), "<", $epsilon, 'meanSqError1()'); my $sigma1 = $lineFit->sigma(); cmp_ok(abs($sigma1 - 0.784511867675187), "<", $epsilon, 'sigma1()'); my @coefficients1 = $lineFit->coefficients(); cmp_ok(abs($coefficients1[0] - 2.9923929074799), "<", $epsilon, 'coefficients1[0]'); cmp_ok(abs($coefficients1[1] - 0.295653654441678), "<", $epsilon, 'coefficients1[1]'); my $sumSqErrors1 = 0; my @residuals = $lineFit->residuals(); for (my $i = 0; $i < @residuals; ++$i) { $sumSqErrors1 += $residuals[$i] ** 2 * $weights[$i]; } cmp_ok(abs($sumSqErrors1 - $lineFit->sumSqErrors()), "<", $epsilon, 'sumSqErrors1()'); # Rescale weights and verify the results are the same for (my $i = 0; $i < $n; ++$i) { $weights[$i] *= 1000 } is($lineFit->setData(\@x, \@y, \@weights), 1, 'setData2(\@x, \@y, \@weights)'); cmp_ok(abs($lineFit->rSquared() - $rSquared1), "<", $epsilon, 'rSquared()'); cmp_ok(abs($lineFit->durbinWatson() - $durbinWatson1), "<", $epsilon, 'durbinWatson2()'); my @tStatistics2 = $lineFit->tStatistics(); cmp_ok(abs($tStatistics2[0] - $tStatistics1[0]), "<", $epsilon, 'tStatistics2[0]'); cmp_ok(abs($tStatistics1[1] - 108.793322452779), "<", $epsilon, 'tStatistics2[0]'); my @varianceOfEstimates2 = $lineFit->varianceOfEstimates(); cmp_ok(abs($varianceOfEstimates2[0] - $varianceOfEstimates1[0]), "<", $epsilon, 'varianceOfEstimates2[0]'); cmp_ok(abs($varianceOfEstimates1[1] - 1.78672521866159e-07), "<", $epsilon, 'varianceOfEstimates2[0]'); cmp_ok(abs($lineFit->meanSqError() - $meanSqError1), "<", $epsilon, 'meanSqError2()'); cmp_ok(abs($lineFit->sigma() - $sigma1), "<", $epsilon, 'sigma2()'); my @coefficients2 = $lineFit->coefficients(); cmp_ok(abs($coefficients2[0] - $coefficients1[0]), "<", $epsilon, 'coefficients2[0]'); cmp_ok(abs($coefficients2[1] - $coefficients1[1]), "<", $epsilon, 'coefficients2[1]'); my $sumSqErrors2 = 0; @residuals = $lineFit->residuals(); for (my $i = 0; $i < @residuals; ++$i) { $sumSqErrors2 += $residuals[$i] ** 2 * $weights[$i]; } cmp_ok(abs($sumSqErrors2 - $sumSqErrors1), "<", $epsilon, 'sumSqErrors2()'); }; is($@, '', 'eval error trap'); Statistics-LineFit/Todo0100700000175500010010000000073010053226716014640 0ustar CustomerNoneTODO list for Perl module Statistics::LineFit Test and document what happens as the sum of the deviations of the x values approaches zero. Test and document the sensitivity of the results to random and Gaussian noise. Abstract this module into a class hierarchy that can do N-dimensional line fitting, curve fitting, cubic splines fitting, non-linear regression, etc. (This would require major coordination / major rewrites with / of the other Statistics:: modules.)