RcppEigen/0000755000176200001440000000000013236624367012142 5ustar liggesusersRcppEigen/inst/0000755000176200001440000000000013236212245013103 5ustar liggesusersRcppEigen/inst/examples/0000755000176200001440000000000013100733213014712 5ustar liggesusersRcppEigen/inst/examples/lmBenchmark.R0000644000176200001440000000531013112625471017270 0ustar liggesusers## lmBenchmark.R: Benchmark different implementations of linear model solutions ## ## Copyright (C) 2011 - 2017 Douglas Bates, Dirk Eddelbuettel and Romain Francois ## ## This file is part of RcppEigen. require("stats", character=TRUE, quietly=TRUE) require("rbenchmark", character=TRUE, quietly=TRUE) require("RcppEigen", character=TRUE, quietly=TRUE) ## define different versions of lm exprs <- list() ## These versions use rank-revealing decompositions and thus can ## handle rank-deficient cases. # default version used in lm() exprs$lm.fit <- expression(stats::lm.fit(mm, y)) # versions from RcppEigen ## column-pivoted QR decomposition - similar to lm.fit exprs$PivQR <- expression(.Call("RcppEigen_fastLm_Impl", mm, y, 0L, PACKAGE="RcppEigen")) ## LDLt Cholesky decomposition with rank detection exprs$LDLt <- expression(.Call("RcppEigen_fastLm_Impl", mm, y, 2L, PACKAGE="RcppEigen")) ## SVD using the Lapack subroutine dgesdd and Eigen support exprs$GESDD <- expression(.Call("RcppEigen_fastLm_Impl", mm, y, 6L, PACKAGE="RcppEigen")) ## SVD (the JacobiSVD class from Eigen) exprs$SVD <- expression(.Call("RcppEigen_fastLm_Impl", mm, y, 4L, PACKAGE="RcppEigen")) ## eigenvalues and eigenvectors of X'X exprs$SymmEig <- expression(.Call("RcppEigen_fastLm_Impl", mm, y, 5L, PACKAGE="RcppEigen")) ## Non-rank-revealing decompositions. These work fine except when ## they don't. ## Unpivoted QR decomposition exprs$QR <- expression(.Call("RcppEigen_fastLm_Impl", mm, y, 1L, PACKAGE="RcppEigen")) ## LLt Cholesky decomposition exprs$LLt <- expression(.Call("RcppEigen_fastLm_Impl", mm, y, 3L, PACKAGE="RcppEigen")) if (suppressMessages(require("RcppArmadillo", character=TRUE, quietly=TRUE))) { exprs$arma <- expression(.Call("RcppArmadillo_fastLm", mm, y, PACKAGE="RcppArmadillo")) } if (suppressMessages(require("RcppGSL", character=TRUE, quietly=TRUE))) { exprs$GSL <- expression(.Call("RcppGSL_fastLm", mm, y, PACKAGE="RcppGSL")) } do_bench <- function(n=100000L, p=40L, nrep=20L, suppressSVD=(n > 100000L)) { mm <- cbind(1, matrix(rnorm(n * (p - 1L)), nc=p-1L)) y <- rnorm(n) if (suppressSVD) exprs <- exprs[!names(exprs) %in% c("SVD", "GSL")] cat("lm benchmark for n = ", n, " and p = ", p, ": nrep = ", nrep, "\n", sep='') do.call(benchmark, c(exprs, list(order="relative", columns = c("test", "relative", "elapsed", "user.self", "sys.self"), replications = nrep))) } print(do_bench()) sessionInfo() .Call("RcppEigen_eigen_version", FALSE, PACKAGE="RcppEigen") .Call("RcppEigen_Eigen_SSE", PACKAGE="RcppEigen") RcppEigen/inst/CITATION0000644000176200001440000000135012253717461014247 0ustar liggesuserscitHeader("To cite RcppEigen in publications use:") citEntry(entry = "Article", title = "Fast and Elegant Numerical Linear Algebra Using the {RcppEigen} Package", author = personList(as.person("Douglas Bates"), as.person("Dirk Eddelbuettel")), journal = "Journal of Statistical Software", year = "2013", volume = "52", number = "5", pages = "1--24", url = "http://www.jstatsoft.org/v52/i05/", textVersion = paste("Douglas Bates, Dirk Eddelbuettel (2013).", "Fast and Elegant Numerical Linear Algebra Using the RcppEigen Package.", "Journal of Statistical Software, 52(5), 1-24.", "URL http://www.jstatsoft.org/v52/i05/.") ) RcppEigen/inst/NEWS.Rd0000644000176200001440000002323513236212416014153 0ustar liggesusers\name{NEWS} \title{News for Package \pkg{RcppEigen}} \newcommand{\ghpr}{\href{https://github.com/RcppCore/RcppEigen/pull/#1}{##1}} \newcommand{\ghit}{\href{https://github.com/RcppCore/RcppEigen/issues/#1}{##1}} \section{Changes in RcppEigen version 0.3.3.4.0 (2018-02-05)}{ \itemize{ \item Updated to version 3.3.4 of Eigen (Yixuan in \ghpr{49}) \item Also carried over on new upstream (Yixuan, addressing \ghit{48}) \item As before, condition \code{long long} use on C++11. \item Pragmas for g++ & clang to suppress diagnostics messages are disabled per CRAN Policy; use \code{-Wno-ignored-attributes} to quieten. } } \section{Changes in RcppEigen version 0.3.3.3.1 (2017-11-19)}{ \itemize{ \item Compilation under Haiku-OS is now supported (Yu Gong in \ghpr{45}). \item The \code{Rcpp.plugin.maker} helper function is called via \code{::} as it is in fact exported (yet we had old code using \code{:::}). \item A spurious argument was removed from an example call. \item Travis CI now uses https to fetch the test runner script. } } \section{Changes in RcppEigen version 0.3.3.3.0 (2017-04-29)}{ \itemize{ \item Updated to version 3.3.3 of Eigen \item Fixed incorrect function names in the examples, thanks to ChingChuan Chen \item The class \code{MappedSparseMatrix} has been deprecated since Eigen 3.3.0. The new structure \code{Map >} should be used instead \item Exporters for the new type \code{Map >} were added \item Travis CI is now driven via \code{run.sh} from our forked r-travis } } \section{Changes in RcppEigen version 0.3.2.9.1 (2017-03-14)}{ \itemize{ \item Synchronize CholMod header file with Matrix package to ensure binary compatibility on all platforms (Martin Maechler in \ghpr{42}) \item Added file \code{init.c} with calls to \code{R_registerRoutines()} \code{and R_useDynamicSymbols()}; also use \code{.registration=TRUE} in \code{useDynLib} in \code{NAMESPACE} } } \section{Changes in RcppEigen version 0.3.2.9.0 (2016-08-20)}{ \itemize{ \item Updated to version 3.2.9 of Eigen (PR \ghpr{37} by Yixuan closing \ghit{36} from Bob Carpenter of the Stan team) \item An exporter for \code{RowVectorX} was added (thanks to PR \ghpr{32} by James Balamuta) } } \section{Changes in RcppEigen version 0.3.2.8.1 (2016-02-29)}{ \itemize{ \item Applied another upstream UBSAN fix (PR \ghpr{30} by Yixuan) } } \section{Changes in RcppEigen version 0.3.2.8.0 (2016-02-23)}{ \itemize{ \item Updated to version 3.2.8 of Eigen (PR \ghpr{29} by Yixuan) } } \section{Changes in RcppEigen version 0.3.2.7.0 (2016-01-18)}{ \itemize{ \item Updated to version 3.2.7 of Eigen \item One unit test file tightened to please R-devel CMD check \item The fastLm example will not include the Lapack header if MKL is defined (thanks to PR \ghpr{25} by Alexey Stukalow) } } \section{Changes in RcppEigen version 0.3.2.5.1 (2015-09-23)}{ \itemize{ \item Corrected use of \code{kitten()} thanks to Grant Brown (#21) \item Applied upstream change to protect against undefined behaviour with null pointers } } \section{Changes in RcppEigen version 0.3.2.5.0 (2015-07-11)}{ \itemize{ \item Updated to version 3.2.5 of Eigen \item Fixed some incorrect error messages stemming from Eigen code. \item Updated package to current \code{R CMD check} standards. } } \section{Changes in RcppEigen version 0.3.2.4.0 (2015-02-23)}{ \itemize{ \item Updated to version 3.2.4 of Eigen \item Update \code{RcppEigen.package.skeleton()} to use \CRANpkg{pkgKitten} if available } } \section{Changes in RcppEigen version 0.3.2.3.0 (2014-12-22)}{ \itemize{ \item Updated to version 3.2.3 of Eigen \item Added a number of additional unit tests for \code{wrap}, \code{transform} and \code{sparse} \item Updated one of the examples } } \section{Changes in RcppEigen version 0.3.2.2.0 (2014-08-19)}{ \itemize{ \item Updated to version 3.2.2 of Eigen \item \code{Rcpp::as()} now supports the conversion from R vector to \dQuote{row array}, i.e., \code{Eigen::Array} \item \code{Rcpp::as()} now supports the conversion from \code{dgRMatrix} (row oriented sparse matrices, defined in \pkg{Matrix} package) to \code{Eigen::MappedSparseMatrix} \item Conversion from R matrix to \code{Eigen::MatrixXd} and \code{Eigen::ArrayXXd} using \code{Rcpp::as()} no longer gives compilation errors } } \section{Changes in RcppEigen version 0.3.2.1.2 (2014-05-05)}{ \itemize{ \item Applied upstream patch to not trigger g++ UBSAN warning on the cpu id comparison, with thanks to Gael Guennebaud for the patch } } \section{Changes in RcppEigen version 0.3.2.1.1 (2014-03-06)}{ \itemize{ \item Better \code{ifdef} on one directory entry feature, with thanks to Brian Ripley. } } \section{Changes in RcppEigen version 0.3.2.1.0 (2014-03-03)}{ \itemize{ \item Updated to version 3.2.1 of Eigen } } \section{Changes in RcppEigen version 0.3.2.0.3 (2014-03-01)}{ \itemize{ \item Updated and extended \code{RcppEigen.package.skeleton()} to use several examples via \CRANpkg{Rcpp} Attributes; also removed the deprecated \code{namespace} argument \item Updated skeleton package example for \CRANpkg{Rcpp} 0.11.0 or later by removing needed for linking with user library \item Updated files \code{DESCRIPTION}, \code{NAMESPACE}, \code{src/Makevars} and \code{src/Makevars.win} similarly } } \section{Changes in RcppEigen version 0.3.2.0.2 (2014-01-26)}{ \itemize{ \item Converted three unused unit test files to \CRANpkg{RUnit} and removed \code{Suggests:} of \CRANpkg{testthat} \item Add declaration to import a symbol from \CRANpkg{Rcpp} to \code{NAMESPACE} to ensure proper instantiation with the upcoming \CRANpkg{Rcpp} version \item Retire \code{SHLIB.maker} function } } \section{Changes in RcppEigen version 0.3.2.0.1 (2013-12-18)}{ \itemize{ \item New maintainer -- with a big thanks to Doug for all his work \item Applied two small patches to deal with non-g++ compilrs \item Clarifications concerning license and authorship of Eigen (as opposed to RcppEigen) code added to \code{DESCRIPTION} at the request of CRAN } } \section{Changes in RcppEigen version 0.3.2.0 (2013-11-13)}{ \itemize{ \item Update to version 3.2.0 of Eigen } } \section{Changes in RcppEigen version 0.3.1.2.3 (2013-10-25)}{ \itemize{ \item Fix to RcppEigenCholmod.h to incorporate changes in the cholmod_factor struct. These changes are necessary if code compiled against RcppEigen that uses CHOLMOD factors is to be run with versions of the Matrix package >= 1.1-0 } } \section{Changes in RcppEigen version 0.3.1.2 (2012-11-29)}{ \itemize{ \item Upgraded to Eigen 3.1.2 \item Fixes to RcppEigenWrap.h and adjustment of tests accordingly. The changes allow RowMajor matrices to be wrapped (thanks to Gael Guennebaud) but cannot handle RowVector types. There will need to be more template metaprogramming done to redirect the case of RowVector, which cannot be changed to a ColMajor form. \item Because of changes in R, -DNDEBUG is automatic. One must override it with -UNDEBUG in the local ~/.R/Makevars to activate the debugging code. \item New (unexported) functions CxxFlags() and RcppEigenCxxFlags() for use in Makefiles \item Fixes related to Rcpp 0.10.* } } \section{Changes in RcppEigen version 0.3.1 (2012-08-07)}{ \itemize{ \item Upgraded to Eigen 3.1.0 \item Removed the "unsupported" Eigen module AutoDiff which defined a macro "sign" that conflicted with a function in the R API (which really should be visible as "Rf_sign", not sure why it shows up as "sign" and don't have time to investigate) \item Commented out several tests involving complex vectors and matrices. Again there are compilation problems related to conflicting definitions in the std:: namespace and the R API and Eigen, which I don't have time to investigate. } } \section{Changes in RcppEigen version 0.2.0 (2012-03-12)}{ \itemize{ \item Upgraded the version of Eigen to 3.1.0-alpha2, in which the sparse matrix modules are now in the "supported" tree. \item Added several "unsupported" Eigen modules including \itemize{ \item AutoDiff (a small automatic differentiation package adapted to vectors and matrices) \item IterativeSolvers (iterative linear and nonlinear solver algorithms) \item KroneckerProduct (as the name implies) \item MatrixFunctions (matrix cos, exp, log, sin, sinh, etc.) \item NonlinearOptimization (based on minpack but uses reverse communication - yay!) \item NumericalDiff (numerical differentiation of vector-valued or matrix-valued functions) \item Polynomials (polynomial representation and solution using a QR algorithm) \item Skyline (sparse skyline matrices useful in finite-element codes) \item SparseExtra (dynamic sparse matrices, now deprecated, and Matrix Market I/O functions) \item Splines (multidimensional spline representations and spline interpolation) } \item At present all these modules, including the MatrixFunctions module, are included with RcppEigen.h but that may change if too many people get unexpected results from A.exp() \item The ability to wrap RowMajor sparse matrices and to use as etc. \item Migrated some tests to the testthat package. Currently there is some difficulty with combining testthat, inline and R CMD check. } } RcppEigen/inst/COPYRIGHTS0000644000176200001440000006535312254314420014532 0ustar liggesusersFiles: inst/include/Eigen/* inst/include/unsupported/Eigen/* Copyright: 2006 - 2012 Gael Guennebaud, Benoit Jacob License: MPL-2 Files: unsupported/Eigen/FFT unsupported/Eigen/src/FFT/ei_fftw_impl.h unsupported/Eigen/src/FFT/ei_kissfft_impl.h Copyright: 2003 - 2009 Mark Borgerding License: MPL-2 Files: inst/include/Eigen/src/SuperLUSupport/SuperLUSupport.h Copyright: 1994 Xerox Corporation License: THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. Files: inst/include/unsupported/Eigen/src/IterativeSolvers/IterationController.h Copyright: 1997 - 2001 Andrew Lumsdaine and Lie-Quan Lee License: This file is part of the Iterative Template Library You should have received a copy of the License Agreement for the Iterative Template Library along with the software; see the file LICENSE. Permission to modify the code and to distribute modified code is granted, provided the text of this NOTICE is retained, a notice that the code was modified is included with the above COPYRIGHT NOTICE and with the COPYRIGHT NOTICE in the LICENSE file, and that the LICENSE file is distributed with the modified code. LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. By way of example, but not limitation, Licensor MAKES NO REPRESENTATIONS OR WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE LICENSED SOFTWARE COMPONENTS OR DOCUMENTATION WILL NOT INFRINGE ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS. Files: inst/include/unsupported/Eigen/Splines inst/include/unsupported/Eigen/src/Splines/SplineFitting.h inst/include/unsupported/Eigen/src/Splines/SplineFwd.h inst/include/unsupported/Eigen/src/Splines/Spline.h inst/include/Eigen/src/Geometry/Umeyama.h inst/include/Eigen/src/StlSupport/details.h inst/include/Eigen/src/StlSupport/StdDeque.h inst/include/Eigen/src/StlSupport/StdList.h inst/include/Eigen/src/StlSupport/StdVector.h inst/include/Eigen/StdDeque inst/include/Eigen/StdList inst/include/Eigen/StdVector inst/include/Eigen/src/Core/DenseStorage.h inst/include/Eigen/src/Core/util/Memory.h inst/include/Eigen/src/Geometry/Transform.h Copyright: 2009 - 2011 Hauke Heibel License: MPL-2 Files: inst/include/Eigen/src/LU/arch/Inverse_SSE.h inst/include/Eigen/src/Cholesky/LLT_MKL.h inst/include/Eigen/src/Core/Assign_MKL.h inst/include/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h inst/include/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h inst/include/Eigen/src/Core/products/GeneralMatrixVector_MKL.h inst/include/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h inst/include/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h inst/include/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h inst/include/Eigen/src/Core/products/TriangularMatrixVector_MKL.h inst/include/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h inst/include/Eigen/src/Core/util/MKL_support.h inst/include/Eigen/src/Eigenvalues/ComplexSchur_MKL.h inst/include/Eigen/src/Eigenvalues/RealSchur_MKL.h inst/include/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h inst/include/Eigen/src/LU/PartialPivLU_MKL.h inst/include/Eigen/src/PardisoSupport/PardisoSupport.h inst/include/Eigen/src/QR/ColPivHouseholderQR_MKL.h inst/include/Eigen/src/QR/HouseholderQR_MKL.h inst/include/Eigen/src/SVD/JacobiSVD_MKL.h Copyright: 2001 Intel Corporation License: Permition is granted to use, copy, distribute and prepare derivative works of this library for any purpose and without fee, provided, that the above copyright notice and this statement appear in all copies. Intel makes no representations about the suitability of this software for any purpose, and specifically disclaims all warranties. Files: inst/include/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h Copyright: 2002 - 2007 Yves Renard License: LPGL-2.1 Files: inst/include/Eigen/src/OrderingMethods/Amd.h Copyright: 2006 Timothy A. Davis License: LGPL-2.1 Files: inst/include/Eigen/src/Core/arch/SSE/MathFunctions.h Copyright: 2007 Julien Pommier License: MPL-2 Files: inst/include/Eigen/src/Core/Assign.h Copyright: 2007 Michael Olbrich License: MPL-2 Files: inst/include/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h inst/include/unsupported/Eigen/src/Skyline/SkylineMatrix.h inst/include/unsupported/Eigen/src/Skyline/SkylineProduct.h inst/include/unsupported/Eigen/src/Skyline/SkylineStorage.h inst/include/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h inst/include/unsupported/Eigen/src/Skyline/SkylineUtil.h Copyright: 2008 - 2009 Guillaume Saupin ; License: MPL-2 Files: inst/include/Eigen/src/Core/arch/AltiVec/PacketMath.h inst/include/Eigen/src/Core/arch/NEON/PacketMath.h Copyright: 2008 Konstantinos Margaritis License: MPL-2 Files: inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h inst/include/unsupported/Eigen/src/MatrixFunction.h inst/include/unsupported/Eigen/MatrixFunctions inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h inst/include/Eigen/src/Eigenvalues/ComplexEigenSolver.h inst/include/Eigen/src/Eigenvalues/ComplexSchur.h inst/include/Eigen/src/Eigenvalues/EigenSolver.h inst/include/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h inst/include/Eigen/src/Eigenvalues/HessenbergDecomposition.h inst/include/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h inst/include/Eigen/src/Eigenvalues/RealSchur.h inst/include/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h inst/include/Eigen/src/Eigenvalues/Tridiagonalization.h inst/include/unsupported/Eigen/src/MatrixFunctions/StemFunction.h inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h Copyright: 2009 - 2010 Jitse Niesen License: MPL-2 Files: inst/include/Eigen/src/Eigenvalues/ComplexEigenSolver.h inst/include/Eigen/src/Eigenvalues/ComplexSchur.h Copyright: 2009 Claire Maurice License: MPL-2 Files: inst/include/unsupported/Eigen/BVH inst/include/unsupported/Eigen/src/BVH/BVAlgorithms.h inst/include/unsupported/Eigen/src/BVH/KdBVH.h Copyright: 2009 Ilya Baran License: MPL-2 Files: inst/include/Eigen/src/Cholesky/LDLT.h Copyright: 2009 Keir Mierle License: MPL-2 Files: inst/include/Eigen/src/Geometry/Quaternion.h Copyright: 2009 Mathieu Gautier License: MPL-2 Files: inst/include/Eigen/src/Geometry/arch/Geometry_SSE.h inst/include/unsupported/Eigen/src/MoreVectorization/MathFunctions.h Copyright: 2009 Rohit Garg License: MPL-2 Files: inst/include/unsupported/Eigen/NonLinearOptimization inst/include/unsupported/Eigen/NumericalDiff inst/include/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h inst/include/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h inst/include/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h inst/include/Eigen/src/Core/util/Memory.h Copyright: 2009 - 2010 Thomas Capricelli License: MPL-2 Files: inst/include/Eigen/src/SparseCore/SparseView.h inst/include/Eigen/src/Sparse/SparseView.h Copyright: 2010 Daniel Lowengrub License: MPL-2 Files: inst/include/unsupported/Eigen/src/Polynomials/Companion.h inst/include/unsupported/Eigen/src/Polynomials/PolynomialSolver.h inst/include/unsupported/Eigen/src/Polynomials/PolynomialUtils.h Copyright: 2010 Manuel Yguel License: MPL-2 Files: inst/include/Eigen/src/Householder/BlockHouseholder.h inst/include/Eigen/src/QR/HouseholderQR.h Copyright: 2010 Vincent Lejeune License: MPL-2 Files: inst/include/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h Copyright: 2011 Andreas Platen License: MPL-2 Files: inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h Copyright: 2011 Chen-Pang He License: MPL-2 Files: inst/include/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h inst/include/unsupported/Eigen/src/IterativeSolvers/GMRES.h Copyright: 2012 Kolja Brix License: MPL-2 Files: inst/include/Eigen/src/Cholesky/LDLT.h Copyright: 2011 Timothy E. Holy License: MPL-2 Files: inst/include/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h inst/include/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h inst/include/Eigen/src/PaStiXSupport/PaStiXSupport.h inst/include/unsupported/Eigen/src/IterativeSolvers/Scaling.h inst/include/unsupported/Eigen/src/SparseExtra/MarketIO.h inst/include/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h Copyright: 2012 Désiré Nuentsa-Wakam License: MPL-2 Files: * Copyright: 2011 - 2013 Douglas Bates, Romain Francois and Dirk Eddelbuettel License: GPL-2 Files: debian/* Copyright: 2013 Dirk Eddelbuettel License: GPL-2 GNU R ships with various open source licenses including - the GPL license (version 2), use RShowDoc("GPL-2") to display it - the LGPL license (version 2.1), use RShowDoc("LGPL-2.1") to display it The MPL-2 is included below. License: MPL-2 Mozilla Public License Version 2.0 ================================== . 1. Definitions -------------- . 1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. . 1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. . 1.3. "Contribution" means Covered Software of a particular Contributor. . 1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. . 1.5. "Incompatible With Secondary Licenses" means . (a) that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or . (b) that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. . 1.6. "Executable Form" means any form of the work other than Source Code Form. . 1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. . 1.8. "License" means this document. . 1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. . 1.10. "Modifications" means any of the following: . (a) any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or . (b) any new file in Source Code Form that contains any Covered Software. . 1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. . 1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. . 1.13. "Source Code Form" means the form of the work preferred for making modifications. . 1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. . 2. License Grants and Conditions -------------------------------- . 2.1. Grants . Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: . (a) under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and . (b) under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. . 2.2. Effective Date . The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. . 2.3. Limitations on Grant Scope . The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: . (a) for any code that a Contributor has removed from Covered Software; or . (b) for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or . (c) under Patent Claims infringed by Covered Software in the absence of its Contributions. . This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). . 2.4. Subsequent Licenses . No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). . 2.5. Representation . Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. . 2.6. Fair Use . This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. . 2.7. Conditions . Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. . 3. Responsibilities ------------------- . 3.1. Distribution of Source Form . All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. . 3.2. Distribution of Executable Form . If You distribute Covered Software in Executable Form then: . (a) such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and . (b) You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. . 3.3. Distribution of a Larger Work . You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). . 3.4. Notices . You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. . 3.5. Application of Additional Terms . You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. . 4. Inability to Comply Due to Statute or Regulation --------------------------------------------------- . If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. . 5. Termination -------------- . 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. . 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. . 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. . ************************************************************************ * * * 6. Disclaimer of Warranty * * ------------------------- * * * * Covered Software is provided under this License on an "as is" * * basis, without warranty of any kind, either expressed, implied, or * * statutory, including, without limitation, warranties that the * * Covered Software is free of defects, merchantable, fit for a * * particular purpose or non-infringing. The entire risk as to the * * quality and performance of the Covered Software is with You. * * Should any Covered Software prove defective in any respect, You * * (not any Contributor) assume the cost of any necessary servicing, * * repair, or correction. This disclaimer of warranty constitutes an * * essential part of this License. No use of any Covered Software is * * authorized under this License except under this disclaimer. * * * ************************************************************************ . ************************************************************************ * * * 7. Limitation of Liability * * -------------------------- * * * * Under no circumstances and under no legal theory, whether tort * * (including negligence), contract, or otherwise, shall any * * Contributor, or anyone who distributes Covered Software as * * permitted above, be liable to You for any direct, indirect, * * special, incidental, or consequential damages of any character * * including, without limitation, damages for lost profits, loss of * * goodwill, work stoppage, computer failure or malfunction, or any * * and all other commercial damages or losses, even if such party * * shall have been informed of the possibility of such damages. This * * limitation of liability shall not apply to liability for death or * * personal injury resulting from such party's negligence to the * * extent applicable law prohibits such limitation. Some * * jurisdictions do not allow the exclusion or limitation of * * incidental or consequential damages, so this exclusion and * * limitation may not apply to You. * * * ************************************************************************ . 8. Litigation ------------- . Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. . 9. Miscellaneous ---------------- . This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. . 10. Versions of the License --------------------------- . 10.1. New Versions . Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. . 10.2. Effect of New Versions . You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. . 10.3. Modified Versions . If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). . 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses . If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. . Exhibit A - Source Code Form License Notice ------------------------------------------- . This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. . If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. . You may add additional accurate notices of copyright ownership. . Exhibit B - "Incompatible With Secondary Licenses" Notice --------------------------------------------------------- . This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. RcppEigen/inst/doc/0000755000176200001440000000000013236220366013653 5ustar liggesusersRcppEigen/inst/doc/RcppEigen-Introduction.Rnw0000644000176200001440000024374712253717461020724 0ustar liggesusers\documentclass[shortnames,article,nojss]{jss} \usepackage{booktabs,bm,amsmath,thumbpdf} %\VignetteIndexEntry{RcppEigen-intro} %\VignetteKeywords{linear algebra, template programming, C++, R, Rcpp} %\VignettePackage{RcppEigen} %% VIGNETTE <>= pkgVersion <- packageDescription("RcppEigen")$Version pkgDate <- packageDescription("RcppEigen")$Date prettyDate <- format(Sys.Date(), "%B %e, %Y") #require("RcppEigen") #eigenVersion <- paste(unlist(.Call("eigen_version", FALSE)), collapse=".") @ \author{Douglas Bates\\University of Wisconsin-Madison \And Dirk Eddelbuettel\\Debian Project} \Plainauthor{Douglas Bates, Dirk Eddelbuettel} \title{Fast and Elegant Numerical Linear Algebra Using the \pkg{RcppEigen} Package} \Plaintitle{Fast and Elegant Numerical Linear Algebra Using the RcppEigen Package} \Shorttitle{Fast and Elegant Numerical Linear Algebra with \pkg{RcppEigen}} \Abstract{ The \pkg{RcppEigen} package provides access from \proglang{R} \citep{R:Main} to the \pkg{Eigen} \citep*{Eigen:Web} \proglang{C++} template library for numerical linear algebra. \pkg{Rcpp} \citep{CRAN:Rcpp,Eddelbuettel:2013:Rcpp} classes and specializations of the \proglang{C++} templated functions \code{as} and \code{wrap} from \pkg{Rcpp} provide the ``glue'' for passing objects from \proglang{R} to \proglang{C++} and back. Several introductory examples are presented. This is followed by an in-depth discussion of various available approaches for solving least-squares problems, including rank-revealing methods, concluding with an empirical run-time comparison. Last but not least, sparse matrix methods are discussed. } \Keywords{linear algebra, template programming, \proglang{R}, \proglang{C++}, \pkg{Rcpp}} \Plainkeywords{linear algebra, template programmig, R, C++, Rcpp} \Address{ Douglas Bates \\ Department of Statistics \\ University of Wisconsin-Madison \\ Madison, WI, United States of America \\ E-mail: \email{bates@stat.wisc.edu} \\ URL: \url{http://www.stat.wisc.edu/~bates/}\\ Dirk Eddelbuettel \\ Debian Project \\ River Forest, IL, United States of America\\ E-mail: \email{edd@debian.org}\\ URL: \url{http://dirk.eddelbuettel.com}\\ } \usepackage{Sweave} \newcommand{\argmin}{\operatorname{argmin}\displaylimits} \newcommand{\rank}{\operatorname{rank}} %% highlights macros %% Style definition file generated by highlight 2.7, http://www.andre-simon.de/ % Highlighting theme definition: \newcommand{\hlstd}[1]{\textcolor[rgb]{0,0,0}{#1}} \newcommand{\hlnum}[1]{\textcolor[rgb]{0,0,0}{#1}} \newcommand{\hlopt}[1]{\textcolor[rgb]{0,0,0}{#1}} \newcommand{\hlesc}[1]{\textcolor[rgb]{0.74,0.55,0.55}{#1}} \newcommand{\hlstr}[1]{\textcolor[rgb]{0.90,0.15,0.15}{#1}} \newcommand{\hldstr}[1]{\textcolor[rgb]{0.74,0.55,0.55}{#1}} \newcommand{\hlslc}[1]{\textcolor[rgb]{0.67,0.13,0.13}{\it{#1}}} \newcommand{\hlcom}[1]{\textcolor[rgb]{0.67,0.13,0.13}{\it{#1}}} \newcommand{\hldir}[1]{\textcolor[rgb]{0,0,0}{#1}} \newcommand{\hlsym}[1]{\textcolor[rgb]{0,0,0}{#1}} \newcommand{\hlline}[1]{\textcolor[rgb]{0.33,0.33,0.33}{#1}} \newcommand{\hlkwa}[1]{\textcolor[rgb]{0.61,0.13,0.93}{\bf{#1}}} \newcommand{\hlkwb}[1]{\textcolor[rgb]{0.13,0.54,0.13}{#1}} \newcommand{\hlkwc}[1]{\textcolor[rgb]{0,0,1}{#1}} \newcommand{\hlkwd}[1]{\textcolor[rgb]{0,0,0}{#1}} \definecolor{bgcolor}{rgb}{1,1,1} % ------------------------------------------------------------------------ \begin{document} \SweaveOpts{engine=R,eps=FALSE} \begin{quote} \footnotesize This vignette corresponds to a \href{http://www.jstatsoft.org/v52/i05/}{paper published} in the \textsl{Journal of Statistical Software}. Currently still identical to the paper, this vignette version may over time receive minor updates. For citations, please use \citet{JSS:RcppEigen} as provided by \code{citation("RcppEigen")}. This version corresponds to \pkg{RcppEigen} version \Sexpr{pkgVersion} and was typeset on \Sexpr{prettyDate}. \end{quote} \section{Introduction} \label{sec:intro} Linear algebra is an essential building block of statistical computing. Operations such as matrix decompositions, linear program solvers, and eigenvalue/eigenvector computations are used in many estimation and analysis routines. As such, libraries supporting linear algebra have long been provided by statistical programmers for different programming languages and environments. Because it is object-oriented, \proglang{C++}, one of the central modern languages for numerical and statistical computing, is particularly effective at representing matrices, vectors and decompositions, and numerous class libraries providing linear algebra routines have been written over the years. As both the \proglang{C++} language and standards have evolved \citep{Meyers:2005:EffectiveC++,Meyers:1995:MoreEffectiveC++,Cpp11}, so have the compilers implementing the language. Relatively modern language constructs such as template meta-programming are particularly useful because they provide overloading of operations (allowing expressive code in the compiled language similar to what can be done in scripting languages) and can shift some of the computational burden from the run-time to the compile-time. (A more detailed discussion of template meta-programming is, however, beyond the scope of this paper). \cite{Veldhuizen:1998:Blitz} provided an early and influential implementation of numerical linear algebra classes that already demonstrated key features of this approach. Its usage was held back at the time by the limited availability of compilers implementing all the necessary features of the \proglang{C++} language. This situation has greatly improved over the last decade, and many more libraries have been made available. One such \proglang{C++} library is \pkg{Eigen} by \citet*{Eigen:Web} which started as a sub-project to KDE (a popular Linux desktop environment), initially focussing on fixed-size matrices to represent projections in a visualization application. \pkg{Eigen} grew from there and has over the course of about a decade produced three major releases with \pkg{Eigen}3 being the current major version. To check the minor and patch version numbers, load the \pkg{RcppEigen} package and call \begin{CodeInput} R> .Call("eigen_version", FALSE) \end{CodeInput} \begin{CodeOutput} major minor patch 3 1 1 \end{CodeOutput} \pkg{Eigen} is of interest as the \proglang{R} system for statistical computation and graphics \citep{R:Main} is itself easily extensible. This is particular true via the \proglang{C} language that most of \proglang{R}'s compiled core parts are written in, but also for the \proglang{C++} language which can interface with \proglang{C}-based systems rather easily. The manual ``Writing \proglang{R} Extensions'' \citep{R:Extensions} is the basic reference for extending \proglang{R} with either \proglang{C} or \proglang{C++}. The \pkg{Rcpp} package by \citet{JSS:Rcpp,CRAN:Rcpp} facilitates extending \proglang{R} with \proglang{C++} code by providing seamless object mapping between both languages. % As stated in the \pkg{Rcpp} \citep{CRAN:Rcpp} vignette, ``Extending \pkg{Rcpp}'' as well as in \citet{Eddelbuettel:2013:Rcpp} \begin{quote} \pkg{Rcpp} facilitates data interchange between \proglang{R} and \proglang{C++} through the templated functions \code{Rcpp::as} (for conversion of objects from \proglang{R} to \proglang{C++}) and \code{Rcpp::wrap} (for conversion from \proglang{C++} to \proglang{R}). \end{quote} The \pkg{RcppEigen} package provides the header files composing the \pkg{Eigen} \proglang{C++} template library, along with implementations of \code{Rcpp::as} and \code{Rcpp::wrap} for the \proglang{C++} classes defined in \pkg{Eigen}. The \pkg{Eigen} classes themselves provide high-performance, versatile and comprehensive representations of dense and sparse matrices and vectors, as well as decompositions and other functions to be applied to these objects. The next section introduces some of these classes and shows how to interface to them from \proglang{R}. \section[Eigen classes]{\pkg{Eigen} classes} \label{sec:eclasses} \pkg{Eigen} is a \proglang{C++} template library providing classes for many forms of matrices, vectors, arrays and decompositions. These classes are flexible and comprehensive allowing for both high performance and well structured code representing high-level operations. \proglang{C++} code based on \pkg{Eigen} is often more like \proglang{R} code, working on the ``whole object'', than like compiled code in other languages where operations often must be coded in loops. As in many \proglang{C++} template libraries using template meta-programming \citep{Abrahams+Gurtovoy:2004:TemplateMetaprogramming}, the templates themselves can be very complicated. However, \pkg{Eigen} provides \code{typedef}s for common classes that correspond to \proglang{R} matrices and vectors, as shown in Table~\ref{tab:REigen}, and this paper will use these \code{typedef}s. \begin{table}[t!] \centering \begin{tabular}{l l} \hline \multicolumn{1}{c}{\proglang{R} object type} & \multicolumn{1}{c}{\pkg{Eigen} class typedef}\\ \hline numeric matrix & \code{MatrixXd}\\ integer matrix & \code{MatrixXi}\\ complex matrix & \code{MatrixXcd}\\ numeric vector & \code{VectorXd}\\ integer vector & \code{VectorXi}\\ complex vector & \code{VectorXcd}\\ \code{Matrix::dgCMatrix} \phantom{XXX} & \code{SparseMatrix}\\ \hline \end{tabular} \caption{Correspondence between \proglang{R} matrix and vector types and classes in the \pkg{Eigen} namespace. \label{tab:REigen}} \end{table} Here, \code{Vector} and \code{Matrix} describe the dimension of the object. The \code{X} signals that these are dynamically-sized objects (as opposed to fixed-size matrices such as $3 \times 3$ matrices also available in \pkg{Eigen}). Lastly, the trailing characters \code{i}, \code{d} and \code{cd} denote, respectively, storage types \code{integer}, \code{double} and \code{complex double}. The \proglang{C++} classes shown in Table~\ref{tab:REigen} are in the \pkg{Eigen} namespace, which means that they must be written as \code{Eigen::MatrixXd}. However, if one prefaces the use of these class names with a declaration like \begin{quote} \noindent \ttfamily \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlopt{::}\hlstd{MatrixXd}\hlopt{;}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \end{quote} \vspace*{-0.4cm} then one can use these names without the namespace qualifier. \subsection[Mapped matrices in Eigen]{Mapped matrices in \pkg{Eigen}} \label{sec:mapped} Storage for the contents of matrices from the classes shown in Table~\ref{tab:REigen} is allocated and controlled by the class constructors and destructors. Creating an instance of such a class from an \proglang{R} object involves copying its contents. An alternative is to have the contents of the \proglang{R} matrix or vector mapped to the contents of the object from the \pkg{Eigen} class. For dense matrices one can use the \pkg{Eigen} templated class \code{Map}, and for sparse matrices one can deploy the \pkg{Eigen} templated class \code{MappedSparseMatrix}. One must, of course, be careful not to modify the contents of the \proglang{R} object in the \proglang{C++} code. A recommended practice is always to declare mapped objects as {\ttfamily\hlkwb{const}\normalfont}. \subsection[Arrays in Eigen]{Arrays in \pkg{Eigen}} \label{sec:arrays} For matrix and vector classes \pkg{Eigen} overloads the \code{`*'} operator as matrix multiplication. Occasionally component-wise operations instead of matrix operations are desired, for which the \code{Array} templated classes are used in \pkg{Eigen}. Switching back and forth between matrices or vectors and arrays is usually done via the \code{array()} method for matrix or vector objects and the \code{matrix()} method for arrays. \subsection[Structured matrices in Eigen]{Structured matrices in \pkg{Eigen}} \label{sec:structured} \pkg{Eigen} provides classes for matrices with special structure such as symmetric matrices, triangular matrices and banded matrices. For dense matrices, these special structures are described as ``views'', meaning that the full dense matrix is stored but only part of the matrix is used in operations. For a symmetric matrix one must specify whether the lower triangle or the upper triangle is to be used as the contents, with the other triangle defined by the implicit symmetry. \section{Some simple examples} \label{sec:simple} \proglang{C++} functions to perform simple operations on matrices or vectors can follow a pattern of: \begin{enumerate} \item Map the \proglang{R} objects passed as arguments into \pkg{Eigen} objects. \item Create the result. \item Return \code{Rcpp::wrap} applied to the result. \end{enumerate} An idiom for the first step is % using Eigen::Map; % using Eigen::MatrixXd; % using Rcpp::as; % % const Map A(as >(AA)); %\end{lstlisting} \begin{quote} \noindent \ttfamily \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlsym{::}\hlstd{Map}\hlsym{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlsym{::}\hlstd{MatrixXd}\hlsym{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using\ }\hlstd{Rcpp}\hlsym{::}\hlstd{as}\hlsym{;}\hspace*{\fill}\\ \hlstd{}\hspace*{\fill}\\ \hlkwb{const\ }\hlstd{Map}\hlsym{$<$}\hlstd{MatrixXd}\hlsym{$>$}\hlstd{\ \ }\hlsym{}\hlstd{}\hlkwd{A}\hlstd{}\hlsym{(}\hlstd{as}\hlsym{$<$}\hlstd{Map}\hlsym{$<$}\hlstd{MatrixXd}\hlsym{$>$\ $>$(}\hlstd{AA}\hlsym{));}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \end{quote} \vspace*{-0.4cm} where \code{AA} is the name of the \proglang{R} object (of type \code{SEXP} in \proglang{C} and \proglang{C++}) passed to the \proglang{C++} function. An alternative to the \code{using} declarations is to declare a \code{typedef} as in % typedef Eigen::Map MapMatd; % const MapMatd A(Rcpp::as(AA)); \begin{quote} \noindent \ttfamily \hlstd{}\hlkwc{typedef\ }\hlstd{Eigen}\hlopt{::}\hlstd{Map}\hlopt{$<$}\hlstd{Eigen}\hlopt{::}\hlstd{MatrixXd}\hlopt{$>$}\hlstd{\ \ \ }\hlopt{}\hlstd{MapMatd}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{MapMatd}\hlstd{\ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlkwd{A}\hlstd{}\hlopt{(}\hlstd{Rcpp}\hlopt{::}\hlstd{as}\hlopt{$<$}\hlstd{MapMatd}\hlopt{$>$(}\hlstd{AA}\hlopt{));}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \end{quote} \vspace*{-0.4cm} The \code{cxxfunction} function from the \pkg{inline} package \citep*{CRAN:inline} for \proglang{R} and its \pkg{RcppEigen} plugin provide a convenient method of developing and debugging the \proglang{C++} code. For actual production code one generally incorporates the \proglang{C++} source code files in a package and includes the line \code{LinkingTo: Rcpp, RcppEigen} in the package's \code{DESCRIPTION} file. The \code{RcppEigen.package.skeleton} function provides a quick way of generating the skeleton of a package that will use \pkg{RcppEigen}. The \code{cxxfunction} with the \code{"Rcpp"} or \code{"RcppEigen"} plugins has the \code{as} and \code{wrap} functions already defined as \code{Rcpp::as} and \code{Rcpp::wrap}. In the examples below these declarations are omitted. It is important to remember that they are needed in actual \proglang{C++} source code for a package. The first few examples are simply for illustration as the operations shown could be more effectively performed directly in \proglang{R}. Finally, the results from \pkg{Eigen} are compared to those from the direct \proglang{R} results. \subsection{Transpose of an integer matrix} \label{sec:transpose} The next \proglang{R} code snippet creates a simple matrix of integers \begin{CodeInput} R> (A <- matrix(1:6, ncol = 2)) \end{CodeInput} \begin{CodeOutput} [,1] [,2] [1,] 1 4 [2,] 2 5 [3,] 3 6 \end{CodeOutput} \begin{CodeInput} R> str(A) \end{CodeInput} \begin{CodeOutput} int [1:3, 1:2] 1 2 3 4 5 6 \end{CodeOutput} and, in Figure~\ref{trans}, the \code{transpose()} method for the \code{Eigen::MatrixXi} class is used to return the transpose of the supplied matrix. The \proglang{R} matrix in the \code{SEXP} \code{AA} is first mapped to an \code{Eigen::MatrixXi} object, and then the matrix \code{At} is constructed from its transpose and returned to \proglang{R}. \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlsym{::}\hlstd{Map}\hlsym{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlsym{::}\hlstd{MatrixXi}\hlsym{;}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlslc{//\ Map\ the\ integer\ matrix\ AA\ from\ R}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{Map}\hlsym{$<$}\hlstd{MatrixXi}\hlsym{$>$}\hlstd{\ \ }\hlsym{}\hlstd{}\hlkwd{A}\hlstd{}\hlsym{(}\hlstd{as}\hlsym{$<$}\hlstd{Map}\hlsym{$<$}\hlstd{MatrixXi}\hlsym{$>$\ $>$(}\hlstd{AA}\hlsym{));}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlslc{//\ evaluate\ and\ return\ the\ transpose\ of\ A}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{MatrixXi}\hlstd{\ \ \ \ \ \ }\hlstd{}\hlkwd{At}\hlstd{}\hlsym{(}\hlstd{A}\hlsym{.}\hlstd{}\hlkwd{transpose}\hlstd{}\hlsym{());}\hspace*{\fill}\\ \hlstd{}\hlkwa{return\ }\hlstd{}\hlkwd{wrap}\hlstd{}\hlsym{(}\hlstd{At}\hlsym{);}\hlstd{}\hspace*{\fill} \normalfont \hrule \caption{\code{transCpp}: Transpose a matrix of integers. \label{trans}} \end{figure} The \proglang{R} snippet below compiles and links the \proglang{C++} code segment. The actual work is done by the function \code{cxxfunction} from the \pkg{inline} package which compiles, links and loads code written in \proglang{C++} and supplied as a character variable. This frees the user from having to know about compiler and linker details and options, which makes ``exploratory programming'' much easier. Here the program piece to be compiled is stored as a character variable named \code{transCpp}, and \code{cxxfunction} creates an executable function which is assigned to \code{ftrans}. This new function is used on the matrix $\bm A$ shown above, and one can check that it works as intended by comparing the output to an explicit transpose of the matrix argument. \begin{CodeInput} R> ftrans <- cxxfunction(signature(AA = "matrix"), transCpp, + plugin = "RcppEigen") R> (At <- ftrans(A)) \end{CodeInput} \begin{CodeOutput} [,1] [,2] [,3] [1,] 1 2 3 [2,] 4 5 6 \end{CodeOutput} \begin{CodeInput} R> stopifnot(all.equal(At, t(A))) \end{CodeInput} For numeric or integer matrices the \code{adjoint()} method is equivalent to the \code{transpose()} method. For complex matrices, the adjoint is the conjugate of the transpose. In keeping with the conventions in the \pkg{Eigen} documentation the \code{adjoint()} method is used in what follows to create the transpose of numeric or integer matrices. \subsection{Products and cross-products} \label{sec:products} As mentioned in Section~\ref{sec:arrays}, the \code{`*'} operator is overloaded as matrix multiplication for the various matrix and vector classes in \pkg{Eigen}. The \proglang{C++} code in Figure~\ref{prod} produces a list containing both the product and cross-product (in the sense of the \proglang{R} function call \code{crossproduct(A, B)} evaluating $\bm A^\top\bm B$) of its two arguments % \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwc{typedef\ }\hlstd{Eigen}\hlopt{::}\hlstd{Map}\hlopt{$<$}\hlstd{Eigen}\hlopt{::}\hlstd{MatrixXi}\hlopt{$>$}\hlstd{\ \ \ }\hlopt{}\hlstd{MapMati}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{MapMati}\hlstd{\ \ \ \ }\hlstd{}\hlkwd{B}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MapMati}\hlopt{$>$(}\hlstd{BB}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{MapMati}\hlstd{\ \ \ \ }\hlstd{}\hlkwd{C}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MapMati}\hlopt{$>$(}\hlstd{CC}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwa{return\ }\hlstd{List}\hlopt{::}\hlstd{}\hlkwd{create}\hlstd{}\hlopt{(}\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"B\ \%{*}\%\ C"}\hlstd{}\hlopt{{)}}\hlstd{\ \ \ \ \ \ \ \ \ }\hlopt{=\ }\hlstd{B\ }\hlopt{{*}\ }\hlstd{C}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"crossprod(B,\ C)"}\hlstd{}\hlopt{{)}\ =\ }\hlstd{B}\hlopt{.}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{()\ {*}\ }\hlstd{C}\hlopt{);}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{prodCpp}: Product and cross-product of two matrices. \label{prod}} \end{figure} % \begin{CodeInput} R> fprod <- cxxfunction(signature(BB = "matrix", CC = "matrix"), + prodCpp, "RcppEigen") R> B <- matrix(1:4, ncol = 2) R> C <- matrix(6:1, nrow = 2) R> str(fp <- fprod(B, C)) \end{CodeInput} \begin{CodeOutput} List of 2 $ B %*% C : int [1:2, 1:3] 21 32 13 20 5 8 $ crossprod(B, C): int [1:2, 1:3] 16 38 10 24 4 10 \end{CodeOutput} \begin{CodeInput} R> stopifnot(all.equal(fp[[1]], B %*% C), all.equal(fp[[2]], crossprod(B, C))) \end{CodeInput} % Note that the \code{create} class method for \code{Rcpp::List} implicitly applies \code{Rcpp::wrap} to its arguments. \subsection{Crossproduct of a single matrix} \label{sec:crossproduct} As shown in the last example, the \proglang{R} function \code{crossprod} calculates the product of the transpose of its first argument with its second argument. The single argument form, \code{crossprod(X)}, evaluates $\bm X^\top\bm X$. One could, of course, calculate this product as \begin{verbatim} t(X) %*% X \end{verbatim} but \code{crossprod(X)} is roughly twice as fast because the result is known to be symmetric and only one triangle needs to be calculated. The function \code{tcrossprod} evaluates \code{crossprod(t(X))} without actually forming the transpose. To express these calculations in \pkg{Eigen}, a \code{SelfAdjointView}---which is a dense matrix of which only one triangle is used, the other triangle being inferred from the symmetry---is created. (The characterization ``self-adjoint'' is equivalent to symmetric for non-complex matrices.) The \pkg{Eigen} class name is \code{SelfAdjointView}. The method for general matrices that produces such a view is called \code{selfadjointView}. Both require specification of either the \code{Lower} or \code{Upper} triangle. For triangular matrices the class is \code{TriangularView} and the method is \code{triangularView}. The triangle can be specified as \code{Lower}, \code{UnitLower}, \code{StrictlyLower}, \code{Upper}, \code{UnitUpper} or \code{StrictlyUpper}. For self-adjoint views the \code{rankUpdate} method adds a scalar multiple of $\bm A\bm A^\top$ to the current symmetric matrix. The scalar multiple defaults to 1. The code in Figure~\ref{crossprod} produces both $\bm A^\top\bm A$ and $\bm A\bm A^\top$ from a matrix $\bm A$. \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlopt{::}\hlstd{Map}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlopt{::}\hlstd{MatrixXi}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlopt{::}\hlstd{Lower}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hspace*{\fill}\\ \hlkwb{const\ }\hlstd{Map}\hlopt{$<$}\hlstd{MatrixXi}\hlopt{$>$\ }\hlstd{}\hlkwd{A}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{Map}\hlopt{$<$}\hlstd{MatrixXi}\hlopt{$>$\ $>$(}\hlstd{AA}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ int}\hlstd{\ \ \ \ \ \ \ \ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{m}\hlstd{}\hlopt{(}\hlstd{A}\hlopt{.}\hlstd{}\hlkwd{rows}\hlstd{}\hlopt{()),\ }\hlstd{}\hlkwd{n}\hlstd{}\hlopt{(}\hlstd{A}\hlopt{.}\hlstd{}\hlkwd{cols}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{MatrixXi}\hlstd{\ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlkwd{AtA}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{MatrixXi}\hlstd{}\hlopt{(}\hlstd{n}\hlopt{,\ }\hlstd{n}\hlopt{).}\hlstd{}\hlkwd{setZero}\hlstd{}\hlopt{().}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{selfadjointView}\hlopt{$<$}\hlstd{Lower}\hlopt{$>$().}\hlstd{}\hlkwd{rankUpdate}\hlstd{}\hlopt{(}\hlstd{A}\hlopt{.}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{()));}\hspace*{\fill}\\ \hlstd{MatrixXi}\hlstd{\ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlkwd{AAt}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{MatrixXi}\hlstd{}\hlopt{(}\hlstd{m}\hlopt{,\ }\hlstd{m}\hlopt{).}\hlstd{}\hlkwd{setZero}\hlstd{}\hlopt{().}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{selfadjointView}\hlopt{$<$}\hlstd{Lower}\hlopt{$>$().}\hlstd{}\hlkwd{rankUpdate}\hlstd{}\hlopt{(}\hlstd{A}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hspace*{\fill}\\ \hlkwa{return\ }\hlstd{List}\hlopt{::}\hlstd{}\hlkwd{create}\hlstd{}\hlopt{(}\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"crossprod(A)"}\hlstd{}\hlopt{{)}}\hlstd{\ \ }\hlopt{=\ }\hlstd{AtA}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"tcrossprod(A)"}\hlstd{}\hlopt{{)}\ =\ }\hlstd{AAt}\hlopt{);}\hlstd{}\hspace*{\fill} \normalfont \normalsize \hrule \caption{\code{crossprodCpp}: Cross-product and transposed cross-product of a single matrix. \label{crossprod}} \end{figure} \begin{CodeInput} R> fcprd <- cxxfunction(signature(AA = "matrix"), crossprodCpp, "RcppEigen") R> str(crp <- fcprd(A)) \end{CodeInput} \begin{CodeOutput} List of 2 $ crossprod(A) : int [1:2, 1:2] 14 32 32 77 $ tcrossprod(A): int [1:3, 1:3] 17 22 27 22 29 36 27 36 45 \end{CodeOutput} \begin{CodeInput} R> stopifnot(all.equal(crp[[1]], crossprod(A)), + all.equal(crp[[2]], tcrossprod(A))) \end{CodeInput} To some, the expressions in Figure~\ref{crossprod} to construct \code{AtA} and \code{AAt} are compact and elegant. To others they are hopelessly confusing. If you find yourself in the latter group, you just need to read the expression left to right. So, for example, we construct \code{AAt} by creating a general integer matrix of size $m\times m$ (where $\bm A$ is $m\times n$), ensuring that all its elements are zero, regarding it as a self-adjoint (i.e., symmetric) matrix using the elements in the lower triangle, then adding $\bm A\bm A^\top$ to it and converting back to a general matrix form (i.e., the strict lower triangle is copied into the strict upper triangle). In more detail: \begin{enumerate} \item \code{MatrixXi(n, n)} creates an $n\times n$ integer matrix with arbitrary contents \item \code{.setZero()} zeros out the contents of the matrix \item \code{.selfAdjointView()} causes what follows to treat the matrix as a symmetric matrix in which only the lower triangle is used, the strict upper triangle being inferred by symmetry \item \code{.rankUpdate(A)} forms the sum $\bm B+\bm A\bm A^\top$ where $\bm B$ is the symmetric matrix of zeros created in the previous steps. \end{enumerate} The assignment of this symmetric matrix to the (general) \code{MatrixXi} object \code{AAt} causes the result to be symmetrized during the assignment. For these products one could define the symmetric matrix from either the lower triangle or the upper triangle as the result will be symmetrized before it is returned. To cut down on repetition of \code{using} statements we gather them in a character variable, \code{incl}, that will be given as the \code{includes} argument in the calls to \code{cxxfunction}. We also define a utility function, \code{AtA}, that returns the crossproduct matrix as shown in Figure~\ref{fig:incl} \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwa{using}\hlstd{\ \ \ }\hlkwa{}\hlstd{Eigen}\hlopt{::}\hlstd{LLT}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using}\hlstd{\ \ \ }\hlkwa{}\hlstd{Eigen}\hlopt{::}\hlstd{Lower}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using}\hlstd{\ \ \ }\hlkwa{}\hlstd{Eigen}\hlopt{::}\hlstd{Map}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using}\hlstd{\ \ \ }\hlkwa{}\hlstd{Eigen}\hlopt{::}\hlstd{MatrixXd}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using}\hlstd{\ \ \ }\hlkwa{}\hlstd{Eigen}\hlopt{::}\hlstd{MatrixXi}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using}\hlstd{\ \ \ }\hlkwa{}\hlstd{Eigen}\hlopt{::}\hlstd{Upper}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using}\hlstd{\ \ \ }\hlkwa{}\hlstd{Eigen}\hlopt{::}\hlstd{VectorXd}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwc{typedef\ }\hlstd{Map}\hlopt{$<$}\hlstd{MatrixXd}\hlopt{$>$}\hlstd{\ \ }\hlopt{}\hlstd{MapMatd}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwc{typedef\ }\hlstd{Map}\hlopt{$<$}\hlstd{MatrixXi}\hlopt{$>$}\hlstd{\ \ }\hlopt{}\hlstd{MapMati}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwc{typedef\ }\hlstd{Map}\hlopt{$<$}\hlstd{VectorXd}\hlopt{$>$}\hlstd{\ \ }\hlopt{}\hlstd{MapVecd}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwc{inline\ }\hlstd{MatrixXd\ }\hlkwd{AtA}\hlstd{}\hlopt{(}\hlstd{}\hlkwb{const\ }\hlstd{MapMatd}\hlopt{\&\ }\hlstd{A}\hlopt{)\ \{}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{}\hlkwb{int}\hlstd{\ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{n}\hlstd{}\hlopt{(}\hlstd{A}\hlopt{.}\hlstd{}\hlkwd{cols}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{}\hlkwa{return}\hlstd{\ \ \ }\hlkwa{}\hlstd{}\hlkwd{MatrixXd}\hlstd{}\hlopt{(}\hlstd{n}\hlopt{,}\hlstd{n}\hlopt{).}\hlstd{}\hlkwd{setZero}\hlstd{}\hlopt{().}\hlstd{selfadjointView}\hlopt{$<$}\hlstd{Lower}\hlopt{$>$()}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlopt{.}\hlstd{}\hlkwd{rankUpdate}\hlstd{}\hlopt{(}\hlstd{A}\hlopt{.}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlopt{\}}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{The contents of the character vector, \code{incl}, that will preface \proglang{C++} code segments that follow. \label{fig:incl}} \end{figure} \subsection{Cholesky decomposition of the crossprod} \label{sec:chol} The Cholesky decomposition of the positive-definite, symmetric matrix, $\bm A$, can be written in several forms. Numerical analysts define the ``LLt'' form as the lower triangular matrix, $\bm L$, such that $\bm A=\bm L\bm L^\top$ and the ``LDLt'' form as a unit lower triangular matrix $\bm L$ and a diagonal matrix $\bm D$ with positive diagonal elements such that $\bm A=\bm L\bm D\bm L^\top$. Statisticians often write the decomposition as $\bm A=\bm R^\top\bm R$ where $\bm R$ is an upper triangular matrix. Of course, this $\bm R$ is simply the transpose of $\bm L$ from the ``LLt'' form. The templated \pkg{Eigen} classes for the LLt and LDLt forms are called \code{LLT} and \code{LDLT} and the corresponding methods are \code{.llt()} and \code{.ldlt()}. Because the Cholesky decomposition involves taking square roots, we pass a numeric matrix, $\bm A$, not an integer matrix. % \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwb{const}\hlstd{\ \ }\hlkwb{}\hlstd{LLT}\hlopt{$<$}\hlstd{MatrixXd}\hlopt{$>$\ }\hlstd{}\hlkwd{llt}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{AtA}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MapMatd}\hlopt{$>$(}\hlstd{AA}\hlopt{)));}\hspace*{\fill}\\ \hlstd{}\hlkwa{return\ }\hlstd{List}\hlopt{::}\hlstd{}\hlkwd{create}\hlstd{}\hlopt{(}\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"L"}\hlstd{}\hlopt{{)}\ =\ }\hlstd{}\hlkwd{MatrixXd}\hlstd{}\hlopt{(}\hlstd{llt}\hlopt{.}\hlstd{}\hlkwd{matrixL}\hlstd{}\hlopt{()),}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"R"}\hlstd{}\hlopt{{)}\ =\ }\hlstd{}\hlkwd{MatrixXd}\hlstd{}\hlopt{(}\hlstd{llt}\hlopt{.}\hlstd{}\hlkwd{matrixU}\hlstd{}\hlopt{()));}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{cholCpp}: Cholesky decomposition of a cross-product. \label{chol}} \end{figure} % \begin{CodeInput} R> storage.mode(A) <- "double" R> fchol <- cxxfunction(signature(AA = "matrix"), cholCpp, "RcppEigen", incl) R> (ll <- fchol(A)) \end{CodeInput} \begin{CodeOutput} $L [,1] [,2] [1,] 3.74166 0.00000 [2,] 8.55236 1.96396 $R [,1] [,2] [1,] 3.74166 8.55236 [2,] 0.00000 1.96396 \end{CodeOutput} \begin{CodeInput} R> stopifnot(all.equal(ll[[2]], chol(crossprod(A)))) \end{CodeInput} \subsection{Determinant of the cross-product matrix} \label{sec:determinant} The ``D-optimal'' criterion for experimental design chooses the design that maximizes the determinant, $|\bm X^\top\bm X|$, for the $n\times p$ model matrix (or Jacobian matrix), $\bm X$. The determinant, $|\bm L|$, of the $p\times p$ lower Cholesky factor $\bm L$, defined so that $\bm L\bm L^\top=\bm X^\top\bm X$, is the product of its diagonal elements, as is the case for any triangular matrix. By the properties of determinants, \begin{displaymath} |\bm X^\top\bm X|=|\bm L\bm L^\top|=|\bm L|\,|\bm L^\top|=|\bm L|^2 \end{displaymath} Alternatively, if using the ``LDLt'' decomposition, $\bm L\bm D\bm L^\top=\bm X^\top\bm X$ where $\bm L$ is unit lower triangular and $\bm D$ is diagonal then $|\bm X^\top\bm X|$ is the product of the diagonal elements of $\bm D$. Because it is known that the diagonal elements of $\bm D$ must be non-negative, one often evaluates the logarithm of the determinant as the sum of the logarithms of the diagonal elements of $\bm D$. Several options are shown in Figure~\ref{cholDet}. % \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwb{const\ }\hlstd{MatrixXd}\hlstd{\ \ }\hlstd{}\hlkwd{ata}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{AtA}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MapMatd}\hlopt{$>$(}\hlstd{AA}\hlopt{)));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ double}\hlstd{\ \ \ }\hlkwb{}\hlstd{}\hlkwd{detL}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{MatrixXd}\hlstd{}\hlopt{(}\hlstd{ata}\hlopt{.}\hlstd{}\hlkwd{llt}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{matrixL}\hlstd{}\hlopt{()).}\hlstd{}\hlkwd{diagonal}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{prod}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd\ }\hlkwd{Dvec}\hlstd{}\hlopt{(}\hlstd{ata}\hlopt{.}\hlstd{}\hlkwd{ldlt}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{vectorD}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwa{return\ }\hlstd{List}\hlopt{::}\hlstd{}\hlkwd{create}\hlstd{}\hlopt{(}\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"d1"}\hlstd{}\hlopt{{)}\ =\ }\hlstd{detL\ }\hlopt{{*}\ }\hlstd{detL}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"d2"}\hlstd{}\hlopt{{)}\ =\ }\hlstd{Dvec}\hlopt{.}\hlstd{}\hlkwd{prod}\hlstd{}\hlopt{(),}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"ld"}\hlstd{}\hlopt{{)}\ =\ }\hlstd{Dvec}\hlopt{.}\hlstd{}\hlkwd{array}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{log}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{sum}\hlstd{}\hlopt{());}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{cholDetCpp}: Determinant of a cross-product using the ``LLt'' and ``LDLt'' forms of the Cholesky decomposition.} \label{cholDet} \end{figure} % \begin{CodeInput} R> fdet <- cxxfunction(signature(AA = "matrix"), cholDetCpp, "RcppEigen", + incl) R> unlist(ll <- fdet(A)) \end{CodeInput} \begin{CodeOutput} d1 d2 ld 54.00000 54.00000 3.98898 \end{CodeOutput} % Note the use of the \code{array()} method in the calculation of the log-determinant. Because the \code{log()} method applies to arrays, not to vectors or matrices, an array must be created from \code{Dvec} before applying the \code{log()} method. \section{Least squares solutions} \label{sec:leastSquares} A common operation in statistical computing is calculating a least squares solution, $\widehat{\bm\beta}$, defined as \begin{displaymath} \widehat{\bm\beta}=\argmin_{\bm \beta}\|\bm y-\bm X\bm\beta\|^2 \end{displaymath} where the model matrix, $\bm X$, is $n\times p$ ($n\ge p$) and $\bm y$ is an $n$-dimensional response vector. There are several ways, based on matrix decompositions, to determine such a solution. Earlier, two forms of the Cholesky decomposition were discussed: ``LLt'' and ``LDLt'', which can both be used to solve for $\widehat{\bm\beta}$. Other decompositions that can be used are the QR decomposition, with or without column pivoting, the singular value decomposition and the eigendecomposition of a symmetric matrix. Determining a least squares solution is relatively straightforward. However, statistical computing often requires additional information, such as the standard errors of the coefficient estimates. Calculating these involves evaluating the diagonal elements of $\left(\bm X^\top\bm X\right)^{-1}$ and the residual sum of squares, $\|\bm y-\bm X\widehat{\bm\beta}\|^2$. \subsection{Least squares using the ``LLt'' Cholesky} \label{sec:LLtLeastSquares} \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwb{const\ }\hlstd{MapMatd}\hlstd{\ \ \ \ \ \ \ \ \ }\hlstd{}\hlkwd{X}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MapMatd}\hlopt{$>$(}\hlstd{XX}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{MapVecd}\hlstd{\ \ \ \ \ \ \ \ \ }\hlstd{}\hlkwd{y}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MapVecd}\hlopt{$>$(}\hlstd{yy}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ int}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{n}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{.}\hlstd{}\hlkwd{rows}\hlstd{}\hlopt{()),\ }\hlstd{}\hlkwd{p}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{.}\hlstd{}\hlkwd{cols}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{LLT}\hlopt{$<$}\hlstd{MatrixXd}\hlopt{$>$\ }\hlstd{}\hlkwd{llt}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{AtA}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ }\hlstd{}\hlkwd{betahat}\hlstd{}\hlopt{(}\hlstd{llt}\hlopt{.}\hlstd{}\hlkwd{solve}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{.}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{()\ {*}\ }\hlstd{y}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ \ }\hlstd{}\hlkwd{fitted}\hlstd{}\hlopt{(}\hlstd{X\ }\hlopt{{*}\ }\hlstd{betahat}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ \ \ }\hlstd{}\hlkwd{resid}\hlstd{}\hlopt{(}\hlstd{y\ }\hlopt{{-}\ }\hlstd{fitted}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ int}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{df}\hlstd{}\hlopt{(}\hlstd{n\ }\hlopt{{-}\ }\hlstd{p}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ double}\hlstd{\ \ \ \ \ \ \ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{s}\hlstd{}\hlopt{(}\hlstd{resid}\hlopt{.}\hlstd{}\hlkwd{norm}\hlstd{}\hlopt{()\ /\ }\hlstd{std}\hlopt{::}\hlstd{}\hlkwd{sqrt}\hlstd{}\hlopt{(}\hlstd{}\hlkwb{double}\hlstd{}\hlopt{(}\hlstd{df}\hlopt{)));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ \ \ \ \ \ }\hlstd{}\hlkwd{se}\hlstd{}\hlopt{(}\hlstd{s\ }\hlopt{{*}\ }\hlstd{llt}\hlopt{.}\hlstd{}\hlkwd{matrixL}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{solve}\hlstd{}\hlopt{(}\hlstd{MatrixXd}\hlopt{::}\hlstd{}\hlkwd{Identity}\hlstd{}\hlopt{(}\hlstd{p}\hlopt{,\ }\hlstd{p}\hlopt{))}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlopt{.}\hlstd{}\hlkwd{colwise}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{norm}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwa{return}\hlstd{\ \ \ \ \ }\hlkwa{}\hlstd{List}\hlopt{::}\hlstd{}\hlkwd{create}\hlstd{}\hlopt{(}\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"coefficients"}\hlstd{}\hlopt{{)}}\hlstd{\ \ \ }\hlopt{=\ }\hlstd{betahat}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"fitted.values"}\hlstd{}\hlopt{{)}}\hlstd{\ \ }\hlopt{=\ }\hlstd{fitted}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"residuals"}\hlstd{}\hlopt{{)}}\hlstd{\ \ \ \ \ \ }\hlopt{=\ }\hlstd{resid}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"s"}\hlstd{}\hlopt{{)}}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlopt{=\ }\hlstd{s}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"df.residual"}\hlstd{}\hlopt{{)}}\hlstd{\ \ \ \ }\hlopt{=\ }\hlstd{df}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"rank"}\hlstd{}\hlopt{{)}}\hlstd{\ \ \ \ \ \ \ \ \ \ \ }\hlopt{=\ }\hlstd{p}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"Std.\ Error"}\hlstd{}\hlopt{{)}}\hlstd{\ \ \ \ \ }\hlopt{=\ }\hlstd{se}\hlopt{);}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{lltLSCpp}: Least squares using the Cholesky decomposition. \label{lltLS}} \end{figure} Figure~\ref{lltLS} shows a calculation of the least squares coefficient estimates (\code{betahat}) and the standard errors (\code{se}) through an ``LLt'' Cholesky decomposition of the crossproduct of the model matrix, $\bm X$. Next, the results from this calculation are compared to those from the \code{lm.fit} function in \proglang{R} (\code{lm.fit} is the workhorse function called by \code{lm} once the model matrix and response have been evaluated). \begin{CodeInput} R> lltLS <- cxxfunction(signature(XX = "matrix", yy = "numeric"), lltLSCpp, + "RcppEigen", incl) R> data("trees", package = "datasets") R> str(lltFit <- with(trees, lltLS(cbind(1, log(Girth)), log(Volume)))) \end{CodeInput} \begin{CodeOutput} List of 7 $ coefficients : num [1:2] -2.35 2.2 $ fitted.values: num [1:31] 2.3 2.38 2.43 2.82 2.86 ... $ residuals : num [1:31] 0.0298 -0.0483 -0.1087 -0.0223 0.0727 ... $ s : num 0.115 $ df.residual : int 29 $ rank : int 2 $ Std. Error : num [1:2] 0.2307 0.0898 R> str(lmFit <- with(trees, lm.fit(cbind(1, log(Girth)), log(Volume)))) List of 8 $ coefficients : Named num [1:2] -2.35 2.2 ..- attr(*, "names")= chr [1:2] "x1" "x2" $ residuals : num [1:31] 0.0298 -0.0483 -0.1087 -0.0223 0.0727 ... $ effects : Named num [1:31] -18.2218 2.8152 -0.1029 -0.0223 0.0721 ... ..- attr(*, "names")= chr [1:31] "x1" "x2" "" "" ... $ rank : int 2 $ fitted.values: num [1:31] 2.3 2.38 2.43 2.82 2.86 ... $ assign : NULL $ qr :List of 5 ..$ qr : num [1:31, 1:2] -5.57 0.18 0.18 0.18 0.18 ... ..$ qraux: num [1:2] 1.18 1.26 ..$ pivot: int [1:2] 1 2 ..$ tol : num 1e-07 ..$ rank : int 2 ..- attr(*, "class")= chr "qr" $ df.residual : int 29 \end{CodeOutput} \begin{CodeInput} R> for(nm in c("coefficients", "residuals", "fitted.values", "rank")) + stopifnot(all.equal(lltFit[[nm]], unname(lmFit[[nm]]))) R> stopifnot(all.equal(lltFit[["Std. Error"]], + unname(coef(summary(lm(log(Volume) ~ log(Girth), trees)))[,2]))) \end{CodeInput} There are several aspects of the \proglang{C++} code in Figure~\ref{lltLS} worth mentioning. The \code{solve} method for the \code{LLT} object evaluates, in this case, $\left(\bm X^\top\bm X\right)^{-1}\bm X^\top\bm y$ but without actually evaluating the inverse. The calculation of the residuals, $\bm y-\widehat{\bm y}$, can be written, as in \proglang{R}, as \code{y - fitted}. (But note that \pkg{Eigen} classes do not have a ``recycling rule'' as in \proglang{R}. That is, the two vector operands must have the same length.) The \code{norm()} method evaluates the square root of the sum of squares of the elements of a vector. Although one does not explicitly evaluate $\left(\bm X^\top\bm X\right)^{-1}$ one does evaluate $\bm L^{-1}$ to obtain the standard errors. Note also the use of the \code{colwise()} method in the evaluation of the standard errors. It applies a method to the columns of a matrix, returning a vector. The \pkg{Eigen} \code{colwise()} and \code{rowwise()} methods are similar in effect to the \code{apply} function in \proglang{R}. In the descriptions of other methods for solving least squares problems, much of the code parallels that shown in Figure~\ref{lltLS}. The redundant parts are omitted, and only the evaluation of the coefficients, the rank and the standard errors is shown. Actually, the standard errors are calculated only up to the scalar multiple of $s$, the residual standard error, in these code fragments. The calculation of the residuals and $s$ and the scaling of the coefficient standard errors is the same for all methods. (See the files \code{fastLm.h} and \code{fastLm.cpp} in the \pkg{RcppEigen} source package for details.) \subsection{Least squares using the unpivoted QR decomposition} \label{sec:QR} A QR decomposition has the form \begin{displaymath} \bm X=\bm Q\bm R=\bm Q_1\bm R_1 \end{displaymath} where $\bm Q$ is an $n\times n$ orthogonal matrix, which means that $\bm Q^\top\bm Q=\bm Q\bm Q^\top=\bm I_n$, and the $n\times p$ matrix $\bm R$ is zero below the main diagonal. The $n\times p$ matrix $\bm Q_1$ is the first $p$ columns of $\bm Q$ and the $p\times p$ upper triangular matrix $\bm R_1$ is the top $p$ rows of $\bm R$. There are three \pkg{Eigen} classes for the QR decomposition: \code{HouseholderQR} provides the basic QR decomposition using Householder transformations, \code{ColPivHouseholderQR} incorporates column pivots and \code{FullPivHouseholderQR} incorporates both row and column pivots. Figure~\ref{QRLS} shows a least squares solution using the unpivoted QR decomposition. The calculations in Figure~\ref{QRLS} are quite similar to those in Figure~\ref{lltLS}. In fact, if one had extracted the upper triangular factor (the \code{matrixU()} method) from the \code{LLT} object in Figure~\ref{lltLS}, the rest of the code would be nearly identical. \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlopt{::}\hlstd{HouseholderQR}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hspace*{\fill}\\ \hlkwb{const\ }\hlstd{HouseholderQR}\hlopt{$<$}\hlstd{MatrixXd}\hlopt{$>$\ }\hlstd{}\hlkwd{QR}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd\ }\hlkwd{betahat}\hlstd{}\hlopt{(}\hlstd{QR}\hlopt{.}\hlstd{}\hlkwd{solve}\hlstd{}\hlopt{(}\hlstd{y}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ }\hlstd{}\hlkwd{fitted}\hlstd{}\hlopt{(}\hlstd{X\ }\hlopt{{*}\ }\hlstd{betahat}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ int}\hlstd{\ \ \ \ \ \ \ \ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{df}\hlstd{}\hlopt{(}\hlstd{n\ }\hlopt{{-}\ }\hlstd{p}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ \ \ \ \ }\hlstd{}\hlkwd{se}\hlstd{}\hlopt{(}\hlstd{QR}\hlopt{.}\hlstd{}\hlkwd{matrixQR}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{topRows}\hlstd{}\hlopt{(}\hlstd{p}\hlopt{).}\hlstd{triangularView}\hlopt{$<$}\hlstd{Upper}\hlopt{$>$()}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlopt{.}\hlstd{}\hlkwd{solve}\hlstd{}\hlopt{(}\hlstd{MatrixXd}\hlopt{::}\hlstd{}\hlkwd{Identity}\hlstd{}\hlopt{(}\hlstd{p}\hlopt{,}\hlstd{p}\hlopt{)).}\hlstd{}\hlkwd{rowwise}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{norm}\hlstd{}\hlopt{());}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{QRLSCpp}: Least squares using the unpivoted QR decomposition. \label{QRLS}} \end{figure} \subsection{Handling the rank-deficient case} \label{sec:rankdeficient} One important consideration when determining least squares solutions is whether $\rank(\bm X)$ is $p$, a situation described by saying that $\bm X$ has ``full column rank''. When $\bm X$ does not have full column rank it is said to be ``rank deficient''. Although the theoretical rank of a matrix is well-defined, its evaluation in practice is not. At best one can compute an effective rank according to some tolerance. Decompositions that allow to estimation of the rank of the matrix in this way are said to be ``rank-revealing''. Because the \code{model.matrix} function in \proglang{R} does a considerable amount of symbolic analysis behind the scenes, one usually ends up with full-rank model matrices. The common cases of rank-deficiency, such as incorporating both a constant term and a full set of indicators columns for the levels of a factor, are eliminated. Other, more subtle, situations will not be detected at this stage, however. A simple example occurs when there is a ``missing cell'' in a two-way layout and the interaction of the two factors is included in the model. \begin{CodeInput} R> dd <- data.frame(f1 = gl(4, 6, labels = LETTERS[1:4]), + f2 = gl(3, 2, labels = letters[1:3]))[-(7:8), ] R> xtabs(~ f2 + f1, dd) \end{CodeInput} \begin{CodeOutput} f1 f2 A B C D a 2 0 2 2 b 2 2 2 2 c 2 2 2 2 \end{CodeOutput} \begin{CodeInput} R> mm <- model.matrix(~ f1 * f2, dd) R> kappa(mm) \end{CodeInput} \begin{CodeOutput} [1] 4.30923e+16 \end{CodeOutput} This yields a large condition number, indicating rank deficiency. Alternatively, the reciprocal condition number can be evaluated. \begin{CodeInput} R> rcond(mm) \end{CodeInput} \begin{CodeOutput} [1] 2.3206e-17 \end{CodeOutput} \begin{CodeInput} R> (c(rank = qr(mm)$rank, p = ncol(mm))) \end{CodeInput} \begin{CodeOutput} rank p 11 12 \end{CodeOutput} \begin{CodeInput} R> set.seed(1) R> dd$y <- mm %*% seq_len(ncol(mm)) + rnorm(nrow(mm), sd = 0.1) R> fm1 <- lm(y ~ f1 * f2, dd) R> writeLines(capture.output(print(summary(fm1), + signif.stars = FALSE))[9:22]) \end{CodeInput} \begin{CodeOutput} Coefficients: (1 not defined because of singularities) Estimate Std. Error t value Pr(>|t|) (Intercept) 0.9779 0.0582 16.8 3.4e-09 f1B 12.0381 0.0823 146.3 < 2e-16 f1C 3.1172 0.0823 37.9 5.2e-13 f1D 4.0685 0.0823 49.5 2.8e-14 f2b 5.0601 0.0823 61.5 2.6e-15 f2c 5.9976 0.0823 72.9 4.0e-16 f1B:f2b -3.0148 0.1163 -25.9 3.3e-11 f1C:f2b 7.7030 0.1163 66.2 1.2e-15 f1D:f2b 8.9643 0.1163 77.1 < 2e-16 f1B:f2c NA NA NA NA f1C:f2c 10.9613 0.1163 94.2 < 2e-16 f1D:f2c 12.0411 0.1163 103.5 < 2e-16 \end{CodeOutput} The \code{lm} function for fitting linear models in \proglang{R} uses a rank-revealing form of the QR decomposition. When the model matrix is determined to be rank deficient, according to the threshold used in \proglang{R}'s \code{qr} function, the model matrix is reduced to $\rank{(\bm X)}$ columns by pivoting selected columns (those that are apparently linearly dependent on columns to their left) to the right hand side of the matrix. A solution for this reduced model matrix is determined and the coefficients and standard errors for the redundant columns are flagged as missing. An alternative approach is to evaluate the ``pseudo-inverse'' of $\bm X$ from the singular value decomposition (SVD) of $\bm X$ or the eigendecomposition of $\bm X^\top\bm X$. The SVD is of the form \begin{displaymath} \bm X=\bm U\bm D\bm V^\top=\bm U_1\bm D_1\bm V^\top \end{displaymath} where $\bm U$ is an orthogonal $n\times n$ matrix and $\bm U_1$ is its leftmost $p$ columns, $\bm D$ is $n\times p$ and zero off the main diagonal so that $\bm D_1$ is a $p\times p$ diagonal matrix with non-increasing, non-negative diagonal elements, and $\bm V$ is a $p\times p$ orthogonal matrix. The pseudo-inverse of $\bm D_1$, written $\bm D_1^+$ is a $p\times p$ diagonal matrix whose first $r=\rank(\bm X)$ diagonal elements are the inverses of the corresponding diagonal elements of $\bm D_1$ and whose last $p-r$ diagonal elements are zero. The tolerance for determining if an element of the diagonal of $\bm D_1$ is considered to be (effectively) zero is a multiple of the largest singular value (i.e., the $(1,1)$ element of $\bm D$). The pseudo-inverse, $\bm X^+$, of $\bm X$ is defined as \begin{displaymath} \bm X^+=\bm V\bm D_1^+\bm U_1^\top . \end{displaymath} \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwc{inline\ }\hlstd{ArrayXd\ }\hlkwd{Dplus}\hlstd{}\hlopt{(}\hlstd{}\hlkwb{const\ }\hlstd{ArrayXd}\hlopt{\&\ }\hlstd{d}\hlopt{)\ \{}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{ArrayXd}\hlstd{\ \ \ }\hlstd{}\hlkwd{di}\hlstd{}\hlopt{(}\hlstd{d}\hlopt{.}\hlstd{}\hlkwd{size}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{}\hlkwb{double}\hlstd{\ \ }\hlkwb{}\hlstd{}\hlkwd{comp}\hlstd{}\hlopt{(}\hlstd{d}\hlopt{.}\hlstd{}\hlkwd{maxCoeff}\hlstd{}\hlopt{()\ {*}\ }\hlstd{}\hlkwd{threshold}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{}\hlkwa{for\ }\hlstd{}\hlopt{(}\hlstd{}\hlkwb{int\ }\hlstd{j\ }\hlopt{=\ }\hlstd{}\hlnum{0}\hlstd{}\hlopt{;\ }\hlstd{j\ }\hlopt{$<$\ }\hlstd{d}\hlopt{.}\hlstd{}\hlkwd{size}\hlstd{}\hlopt{();\ ++}\hlstd{j}\hlopt{)\ }\hlstd{di}\hlopt{{[}}\hlstd{j}\hlopt{{]}\ =\ (}\hlstd{d}\hlopt{{[}}\hlstd{j}\hlopt{{]}\ $<$\ }\hlstd{comp}\hlopt{)\ }\hlstd{?\ }\hlnum{0}\hlstd{}\hlopt{.\ :\ }\hlstd{}\hlnum{1}\hlstd{}\hlopt{./}\hlstd{d}\hlopt{{[}}\hlstd{j}\hlopt{{]};}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{}\hlkwa{return\ }\hlstd{di}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlopt{\}}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{DplusCpp}: Create the diagonal $\bm d^+$ of the pseudo-inverse, $\bm D_1^+$, from the array of singular values, $\bm d$. \label{Dplus}} \end{figure} In Figure~\ref{Dplus} a utility function, \code{Dplus}, is defined to return the diagonal of the pseudo-inverse, $\bm D_1^+$, as an array, given the singular values (the diagonal of $\bm D_1$) as an array. Calculation of the maximum element of $\bm d$ (the method is called \code{.maxCoeff()}) and the use of a \code{threshold()} function provides greater generality for the function. It can be used on the eigenvalues of $\bm X^\top\bm X$, as shown in Section~\ref{sec:eigendecomp}, even though these are returned in increasing order, as opposed to the decreasing order of the singular values. \subsection{Least squares using the SVD} \label{sec:SVDls} \begin{figure}[b!] \hrule \smallskip \noindent \ttfamily \hlkwb{const\ }\hlstd{Eigen}\hlopt{::}\hlstd{JacobiSVD}\hlopt{$<$}\hlstd{MatrixXd}\hlopt{$>$}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlkwd{UDV}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{.}\hlstd{}\hlkwd{jacobiSvd}\hlstd{}\hlopt{(}\hlstd{Eigen}\hlopt{::}\hlstd{ComputeThinU}\hlopt{\textbar }\hlstd{Eigen}\hlopt{::}\hlstd{ComputeThinV}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{ArrayXd}\hlstd{\ \ \ \ \ \ \ }\hlstd{}\hlkwd{Dp}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{Dplus}\hlstd{}\hlopt{(}\hlstd{UDV}\hlopt{.}\hlstd{}\hlkwd{singularValues}\hlstd{}\hlopt{()));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ int}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{r}\hlstd{}\hlopt{((}\hlstd{Dp\ }\hlopt{$>$\ }\hlstd{}\hlnum{0}\hlstd{}\hlopt{).}\hlstd{}\hlkwd{count}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{MatrixXd}\hlstd{\ \ \ \ \ }\hlstd{}\hlkwd{VDp}\hlstd{}\hlopt{(}\hlstd{UDV}\hlopt{.}\hlstd{}\hlkwd{matrixV}\hlstd{}\hlopt{()\ {*}\ }\hlstd{Dp}\hlopt{.}\hlstd{}\hlkwd{matrix}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{asDiagonal}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd\ }\hlkwd{betahat}\hlstd{}\hlopt{(}\hlstd{VDp\ }\hlopt{{*}\ }\hlstd{UDV}\hlopt{.}\hlstd{}\hlkwd{matrixU}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{()\ {*}\ }\hlstd{y}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ \ \ \ \ }\hlstd{}\hlkwd{se}\hlstd{}\hlopt{(}\hlstd{s\ }\hlopt{{*}\ }\hlstd{VDp}\hlopt{.}\hlstd{}\hlkwd{rowwise}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{norm}\hlstd{}\hlopt{());}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{SVDLSCpp}: Least squares using the SVD. \label{SVDLS}} \end{figure} With these definitions the code for least squares using the singular value decomposition can be written as in Figure~\ref{SVDLS}. Even in the rank-deficient case this code will produce a complete set of coefficients and their standard errors. The user must be careful to check if the computed rank is less than $p$, the number of columns in $\bm X$, in which case the estimated coefficients are just one of an infinite number of coefficient vectors that produce the same fitted values. It happens that the solution from this pseudo-inverse is the minimum norm solution (in the sense that $\|\bm\beta\|^2$ is minimized among those $\bm\beta$ that minimize $\|\bm y-\bm X\bm\beta\|^2$). The standard errors of the coefficient estimates in the rank-deficient case must be interpreted carefully. The solution with one or more missing coefficients, as returned by the \code{lm.fit} function in \proglang{R} and by the column-pivoted QR decomposition described in Section~\ref{sec:colPivQR}, does not provide standard errors for the missing coefficients. That is, both the coefficient and its standard error are returned as \code{NA} because the least squares solution is performed on a reduced model matrix. It is also true that the solution returned by the SVD method is with respect to a reduced model matrix but the $p$ coefficient estimates and their $p$ standard errors don't show this. They are, in fact, linear combinations of a set of $r$ coefficient estimates and their standard errors. \pagebreak \subsection{Least squares using the eigendecomposition} \label{sec:eigendecomp} The eigendecomposition of $\bm X^\top\bm X$ is defined as \begin{displaymath} \bm X^\top\bm X=\bm V\bm\Lambda\bm V^\top \end{displaymath} where $\bm V$, the matrix of eigenvectors, is a $p\times p$ orthogonal matrix and $\bm\Lambda$ is a $p\times p$ diagonal matrix with non-decreasing, non-negative diagonal elements, called the eigenvalues of $\bm X^\top\bm X$. When the eigenvalues are distinct, this $\bm V$ is the same (up to reordering of the columns) as that in the SVD and the eigenvalues of $\bm X^\top\bm X$ are the (reordered) squares of the singular values of $\bm X$. With these definitions one can adapt much of the code from the SVD method for the eigendecomposition, as shown in Figure~\ref{SymmEigLS}. \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlkwb{const\ }\hlstd{Eigen}\hlopt{::}\hlstd{SelfAdjointEigenSolver}\hlopt{$<$}\hlstd{MatrixXd}\hlopt{$>$\ }\hlstd{}\hlkwd{VLV}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{AtA}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{ArrayXd}\hlstd{\ \ \ \ \ \ \ }\hlstd{}\hlkwd{Dp}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{Dplus}\hlstd{}\hlopt{(}\hlstd{eig}\hlopt{.}\hlstd{}\hlkwd{eigenvalues}\hlstd{}\hlopt{()).}\hlstd{}\hlkwd{sqrt}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ int}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{r}\hlstd{}\hlopt{((}\hlstd{Dp\ }\hlopt{$>$\ }\hlstd{}\hlnum{0}\hlstd{}\hlopt{).}\hlstd{}\hlkwd{count}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{MatrixXd}\hlstd{\ \ \ \ \ }\hlstd{}\hlkwd{VDp}\hlstd{}\hlopt{(}\hlstd{VLV}\hlopt{.}\hlstd{}\hlkwd{eigenvectors}\hlstd{}\hlopt{()\ {*}\ }\hlstd{Dp}\hlopt{.}\hlstd{}\hlkwd{matrix}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{asDiagonal}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd\ }\hlkwd{betahat}\hlstd{}\hlopt{(}\hlstd{VDp\ }\hlopt{{*}\ }\hlstd{VDp}\hlopt{.}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{()\ {*}\ }\hlstd{X}\hlopt{.}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{()\ {*}\ }\hlstd{y}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ \ \ \ \ }\hlstd{}\hlkwd{se}\hlstd{}\hlopt{(}\hlstd{s\ }\hlopt{{*}\ }\hlstd{VDp}\hlopt{.}\hlstd{}\hlkwd{rowwise}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{norm}\hlstd{}\hlopt{());}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{SymmEigLSCpp}: Least squares using the eigendecomposition. \label{SymmEigLS}} \end{figure} \subsection{Least squares using the column-pivoted QR decomposition} \label{sec:colPivQR} The column-pivoted QR decomposition provides results similar to those from \proglang{R} in both the full-rank and the rank-deficient cases. The decomposition is of the form \begin{displaymath} \bm X\bm P=\bm Q\bm R=\bm Q_1\bm R_1 \end{displaymath} where, as before, $\bm Q$ is $n\times n$ and orthogonal and $\bm R$ is $n\times p$ and upper triangular. The $p\times p$ matrix $\bm P$ is a permutation matrix. That is, its columns are a permutation of the columns of $\bm I_p$. It serves to reorder the columns of $\bm X$ so that the diagonal elements of $\bm R$ are non-increasing in magnitude. An instance of the class \code{Eigen::ColPivHouseholderQR} has a \code{rank()} method returning the computational rank of the matrix. When $\bm X$ is of full rank one can use essentially the same code as in the unpivoted decomposition except that one must reorder the standard errors. When $\bm X$ is rank-deficient, the coefficients and standard errors are evaluated for the leading $r$ columns of $\bm X\bm P$ only. In the rank-deficient case the straightforward calculation of the fitted values, as $\bm X\widehat{\bm\beta}$, cannot be used because some of the estimated coefficients, $\widehat{\bm\beta}$, are \code{NA} so the product will be a vector of \code{NA}'s. One could do some complicated rearrangement of the columns of X and the coefficient estimates but it is conceptually (and computationally) easier to employ the relationship \begin{displaymath} \widehat{\bm y} = \bm Q_1\bm Q_1^\top\bm y=\bm Q \begin{bmatrix} \bm I_r & \bm 0\\ \bm 0 & \bm 0 \end{bmatrix} \bm Q^\top\bm y \end{displaymath} The vector $\bm Q^\top\bm y$ is called the ``effects'' vector in \proglang{R}. \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwc{typedef\ }\hlstd{Eigen}\hlopt{::}\hlstd{ColPivHouseholderQR}\hlopt{$<$}\hlstd{MatrixXd}\hlopt{$>$}\hlstd{\ \ }\hlopt{}\hlstd{CPivQR}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwc{typedef\ }\hlstd{CPivQR}\hlopt{::}\hlstd{PermutationType}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Permutation}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hspace*{\fill}\\ \hlkwb{const\ }\hlstd{CPivQR}\hlstd{\ \ \ \ \ \ \ }\hlstd{}\hlkwd{PQR}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{Permutation\ }\hlkwd{Pmat}\hlstd{}\hlopt{(}\hlstd{PQR}\hlopt{.}\hlstd{}\hlkwd{colsPermutation}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ int}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{r}\hlstd{}\hlopt{(}\hlstd{PQR}\hlopt{.}\hlstd{}\hlkwd{rank}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{VectorXd}\hlstd{\ \ \ \ \ \ \ }\hlstd{betahat}\hlopt{,\ }\hlstd{fitted}\hlopt{,\ }\hlstd{se}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{if\ }\hlstd{}\hlopt{(}\hlstd{r\ }\hlopt{==\ }\hlstd{X}\hlopt{.}\hlstd{}\hlkwd{cols}\hlstd{}\hlopt{())\ \{\ }\hlstd{}\hlslc{//\ full\ rank\ case}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{betahat\ }\hlopt{=\ }\hlstd{PQR}\hlopt{.}\hlstd{}\hlkwd{solve}\hlstd{}\hlopt{(}\hlstd{y}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{fitted}\hlstd{\ \ }\hlstd{}\hlopt{=\ }\hlstd{X\ }\hlopt{{*}\ }\hlstd{betahat}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{se}\hlstd{\ \ \ \ \ \ }\hlstd{}\hlopt{=\ }\hlstd{Pmat\ }\hlopt{{*}\ }\hlstd{PQR}\hlopt{.}\hlstd{}\hlkwd{matrixQR}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{topRows}\hlstd{}\hlopt{(}\hlstd{p}\hlopt{).}\hlstd{triangularView}\hlopt{$<$}\hlstd{Upper}\hlopt{$>$()}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlopt{.}\hlstd{}\hlkwd{solve}\hlstd{}\hlopt{(}\hlstd{MatrixXd}\hlopt{::}\hlstd{}\hlkwd{Identity}\hlstd{}\hlopt{(}\hlstd{p}\hlopt{,\ }\hlstd{p}\hlopt{)).}\hlstd{}\hlkwd{rowwise}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{norm}\hlstd{}\hlopt{();}\hspace*{\fill}\\ \hlstd{}\hlopt{\}\ }\hlstd{}\hlkwa{else\ }\hlstd{}\hlopt{\{}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{MatrixXd}\hlstd{\ \ \ \ }\hlstd{}\hlkwd{Rinv}\hlstd{}\hlopt{(}\hlstd{PQR}\hlopt{.}\hlstd{}\hlkwd{matrixQR}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{topLeftCorner}\hlstd{}\hlopt{(}\hlstd{r}\hlopt{,\ }\hlstd{r}\hlopt{)}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ }\hlstd{}\hlopt{.}\hlstd{triangularView}\hlopt{$<$}\hlstd{Upper}\hlopt{$>$().}\hlstd{}\hlkwd{solve}\hlstd{}\hlopt{(}\hlstd{MatrixXd}\hlopt{::}\hlstd{}\hlkwd{Identity}\hlstd{}\hlopt{(}\hlstd{r}\hlopt{,\ }\hlstd{r}\hlopt{)));}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{VectorXd\ }\hlkwd{effects}\hlstd{}\hlopt{(}\hlstd{PQR}\hlopt{.}\hlstd{}\hlkwd{householderQ}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{()\ {*}\ }\hlstd{y}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{betahat}\hlopt{.}\hlstd{}\hlkwd{fill}\hlstd{}\hlopt{(::}\hlstd{NA\textunderscore REAL}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{betahat}\hlopt{.}\hlstd{}\hlkwd{head}\hlstd{}\hlopt{(}\hlstd{r}\hlopt{)}\hlstd{\ \ }\hlopt{=\ }\hlstd{Rinv\ }\hlopt{{*}\ }\hlstd{effects}\hlopt{.}\hlstd{}\hlkwd{head}\hlstd{}\hlopt{(}\hlstd{r}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{betahat}\hlstd{\ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlopt{=\ }\hlstd{Pmat\ }\hlopt{{*}\ }\hlstd{betahat}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{se}\hlopt{.}\hlstd{}\hlkwd{fill}\hlstd{}\hlopt{(::}\hlstd{NA\textunderscore REAL}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{se}\hlopt{.}\hlstd{}\hlkwd{head}\hlstd{}\hlopt{(}\hlstd{r}\hlopt{)}\hlstd{\ \ \ \ \ \ \ }\hlopt{=\ }\hlstd{Rinv}\hlopt{.}\hlstd{}\hlkwd{rowwise}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{norm}\hlstd{}\hlopt{();}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{se}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlopt{=\ }\hlstd{Pmat\ }\hlopt{{*}\ }\hlstd{se}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlslc{//\ create\ fitted\ values\ from\ effects}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{effects}\hlopt{.}\hlstd{}\hlkwd{tail}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{.}\hlstd{}\hlkwd{rows}\hlstd{}\hlopt{()\ {-}\ }\hlstd{r}\hlopt{).}\hlstd{}\hlkwd{setZero}\hlstd{}\hlopt{();}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{fitted}\hlstd{\ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlopt{=\ }\hlstd{PQR}\hlopt{.}\hlstd{}\hlkwd{householderQ}\hlstd{}\hlopt{()\ {*}\ }\hlstd{effects}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlopt{\}}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{ColPivQRLSCpp}: Least squares using the pivoted QR decomposition. \label{ColPivQRLS}} \end{figure} Just to check that the code in Figure~\ref{ColPivQRLS} does indeed provide the desired answer \begin{CodeInput} R> print(summary(fmPQR <- fastLm(y ~ f1 * f2, dd)), signif.st = FALSE) \end{CodeInput} \begin{CodeOutput} Call: fastLm.formula(formula = y ~ f1 * f2, data = dd) Estimate Std. Error t value Pr(>|t|) (Intercept) 0.977859 0.058165 16.812 3.413e-09 f1B 12.038068 0.082258 146.346 < 2.2e-16 f1C 3.117222 0.082258 37.896 5.221e-13 f1D 4.068523 0.082258 49.461 2.833e-14 f2b 5.060123 0.082258 61.516 2.593e-15 f2c 5.997592 0.082258 72.912 4.015e-16 f1B:f2b -3.014763 0.116330 -25.916 3.266e-11 f1C:f2b 7.702999 0.116330 66.217 1.156e-15 f1D:f2b 8.964251 0.116330 77.059 < 2.2e-16 f1B:f2c NA NA NA NA f1C:f2c 10.961326 0.116330 94.226 < 2.2e-16 f1D:f2c 12.041081 0.116330 103.508 < 2.2e-16 Residual standard error: 0.2868 on 11 degrees of freedom Multiple R-squared: 0.9999, Adjusted R-squared: 0.9999 \end{CodeOutput} \begin{CodeInput} R> all.equal(coef(fm1), coef(fmPQR)) \end{CodeInput} \begin{CodeOutput} [1] TRUE \end{CodeOutput} \begin{CodeInput} R> all.equal(unname(fitted(fm1)), fitted(fmPQR)) \end{CodeInput} \begin{CodeOutput} [1] TRUE \end{CodeOutput} \begin{CodeInput} R> all.equal(unname(residuals(fm1)), residuals(fmPQR)) \end{CodeInput} \begin{CodeOutput} [1] TRUE \end{CodeOutput} The rank-revealing SVD method produces the same fitted values but not the same coefficients. \begin{CodeInput} R> print(summary(fmSVD <- fastLm(y ~ f1 * f2, dd, method = 4)), + signif.st = FALSE) \end{CodeInput} \begin{CodeOutput} Call: fastLm.formula(formula = y ~ f1 * f2, data = dd, method = 4) Estimate Std. Error t value Pr(>|t|) (Intercept) 0.977859 0.058165 16.812 3.413e-09 f1B 7.020458 0.038777 181.049 < 2.2e-16 f1C 3.117222 0.082258 37.896 5.221e-13 f1D 4.068523 0.082258 49.461 2.833e-14 f2b 5.060123 0.082258 61.516 2.593e-15 f2c 5.997592 0.082258 72.912 4.015e-16 f1B:f2b 2.002847 0.061311 32.667 2.638e-12 f1C:f2b 7.702999 0.116330 66.217 1.156e-15 f1D:f2b 8.964251 0.116330 77.059 < 2.2e-16 f1B:f2c 5.017610 0.061311 81.838 < 2.2e-16 f1C:f2c 10.961326 0.116330 94.226 < 2.2e-16 f1D:f2c 12.041081 0.116330 103.508 < 2.2e-16 Residual standard error: 0.2868 on 11 degrees of freedom Multiple R-squared: 0.9999, Adjusted R-squared: 0.9999 \end{CodeOutput} \begin{CodeInput} R> all.equal(coef(fm1), coef(fmSVD)) \end{CodeInput} \begin{CodeOutput} [1] "'is.NA' value mismatch: 0 in current 1 in target" \end{CodeOutput} \begin{CodeInput} R> all.equal(unname(fitted(fm1)), fitted(fmSVD)) \end{CodeInput} \begin{CodeOutput} [1] TRUE \end{CodeOutput} \begin{CodeInput} R> all.equal(unname(residuals(fm1)), residuals(fmSVD)) \end{CodeInput} \begin{CodeOutput} [1] TRUE \end{CodeOutput} The coefficients from the symmetric eigendecomposition method are the same as those from the SVD, hence the fitted values and residuals must be the same for these two methods. \begin{CodeInput} R> summary(fmVLV <- fastLm(y ~ f1 * f2, dd, method = 5)) R> all.equal(coef(fmSVD), coef(fmVLV)) \end{CodeInput} \begin{CodeOutput} [1] TRUE \end{CodeOutput} \subsection{Comparative speed} In the \pkg{RcppEigen} package the \proglang{R} function to fit linear models using the methods described above is called \code{fastLm}. It follows an earlier example in the \pkg{Rcpp} package which was carried over to both \pkg{RcppArmadillo} and \pkg{RcppGSL}. The natural question to ask is, ``Is it indeed fast to use these methods based on \pkg{Eigen}?''. To this end, the package provides benchmarking code for these methods, \proglang{R}'s \code{lm.fit} function and the \code{fastLm} implementations in the \pkg{RcppArmadillo} \citep{CRAN:RcppArmadillo} and \pkg{RcppGSL} \citep{CRAN:RcppGSL} packages, if they are installed. The benchmark code, which uses the \pkg{rbenchmark} \citep{CRAN:rbenchmark} package, is in a file named \code{lmBenchmark.R} in the \code{examples} subdirectory of the installed \pkg{RcppEigen} package. It can be run as \begin{CodeInput} R> source(system.file("examples", "lmBenchmark.R", package = "RcppEigen")) \end{CodeInput} Results will vary according to the speed of the processor and the implementation of the BLAS (Basic Linear Algebra Subroutines) used. (\pkg{Eigen} methods do not use the BLAS but the other methods do.) The \pkg{Eigen}3 template library does not use multi-threaded code for these operations but does use the graphics pipeline instructions (SSE and SSE2, in this case) in some calculations. \begin{table}[t!] \centering \begin{tabular}{r r r r r} \hline Method & Relative& Elapsed & User & Sys \\ \hline LDLt & 1.00 & 1.18 & 1.17 & 0.00 \\ LLt & 1.01 & 1.19 & 1.17 & 0.00 \\ SymmEig & 2.76 & 3.25 & 2.70 & 0.52 \\ QR & 6.35 & 7.47 & 6.93 & 0.53 \\ arma & 6.60 & 7.76 & 25.69 & 4.47 \\ PivQR & 7.15 & 8.41 & 7.78 & 0.62 \\ lm.fit & 11.68 & 13.74 & 21.56 & 16.79 \\ GESDD & 12.58 & 14.79 & 44.01 & 10.96 \\ SVD & 44.48 & 52.30 & 51.38 & 0.80 \\ GSL & 150.46 & 176.95 & 210.52 & 149.86 \\ \hline \end{tabular} \caption{\code{lmBenchmark} results on a desktop computer for the default size, $100,000\times 40$, full-rank model matrix running 20 repetitions for each method. Times (Elapsed, User, and Sys) are in seconds. The BLAS in use is a locally-rebuilt version of the OpenBLAS library included with Ubuntu 11.10. \label{tab:lmRes}} \end{table} Results obtained on a desktop computer, circa 2010, are shown in Table~\ref{tab:lmRes}. The processor used for these timings is a 4-core processor but almost all the methods are single-threaded and not affected by the number of cores. Only the \code{arma}, \code{lm.fit}, \code{GESDD} and \code{GSL} methods benefit from the multi-threaded BLAS implementation provided by OpenBLAS, and the relative speed increase is modest for this problem size and number of cores (at 7.76 seconds relative to 10.29 seconds for \code{arma}, 13.74 seconds relative to 16.91 seconds for \code{lm.fit}, and 176.95 seconds relative to 193.29 seconds for the \code{GSL}). Parallel computing approaches will always have to trade-off increased communication and overhead costs against the potential gains from running multiple execution threads. % Nonetheless, with the ongoing %shift to multi-core architectures and an ever increasing number of cores in %modern processing units, it is conceivable that \pkg{Eigen} may also switch %to a multi-threaded approach to further improve its performance. These results indicate that methods based on forming and decomposing $\bm X^\top\bm X$ (LDLt, LLt and SymmEig) are considerably faster than the others. The SymmEig method, using a rank-revealing decomposition, would be preferred, although the LDLt method could probably be modified to be rank-revealing. However, the dimensions of the problem will influence the comparative results. Because there are 100,000 rows in $\bm X$, methods that decompose the whole $\bm X$ matrix (all the methods except those named above) will be at a disadvantage. The pivoted QR method is 1.6 times faster than \proglang{R}'s \code{lm.fit} on this test and provides nearly the same information as \code{lm.fit}. Methods based on the singular value decomposition (SVD and GSL) are much slower but, as mentioned above, this is caused in part by $\bm X$ having many more rows than columns. The GSL method from the GNU Scientific Library uses an older algorithm for the SVD and is clearly out of contention. The \code{GESDD} method provides an interesting hybrid: It uses the \pkg{Eigen} classes, but then deploys the LAPACK routine \code{dgesdd} for the actual SVD calculation. The resulting time is much faster than using the SVD implementation of \pkg{Eigen} which is not a particularly fast SVD method. \section{Delayed evaluation} \label{sec:delayed} A form of delayed evaluation is used in \pkg{Eigen}. That is, many operators and methods do not evaluate the result but instead return an ``expression object'' that is evaluated when needed. As an example, even though one writes the $\bm X^\top\bm X$ evaluation as \code{.rankUpdate(X.adjoint())} the \code{X.adjoint()} part is not evaluated immediately. The \code{rankUpdate} method detects that it has been passed a matrix that is to be used in its transposed form and evaluates the update by taking inner products of columns of $\bm X$ instead of rows of $\bm X^\top$. As \pkg{Eigen} has developed some of these unevaluated expressions have been modified. In \pkg{Eigen 3.1}, which is incorporated in version 0.3.1 of \pkg{RcppEigen}, the \code{.adjoint()} method applied to a real dense matrix copies the contents of the original matrix, flags it as row-major and interchanges the number of rows and columns. This is indeed the adjoint of the original matrix but, at one time, the \code{wrap} method for the \pkg{Eigen} dense matrix classes would fail on row-major matrices. \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwb{const\ }\hlstd{MapMati}\hlstd{\ \ }\hlstd{}\hlkwd{A}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MapMati}\hlopt{$>$(}\hlstd{AA}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwa{return\ }\hlstd{}\hlkwd{wrap}\hlstd{}\hlopt{(}\hlstd{A}\hlopt{.}\hlstd{}\hlkwd{transpose}\hlstd{}\hlopt{());}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{badtransCpp}: Transpose producing a run-time error in early versions of \pkg{RcppEigen}. \label{badtrans}} \end{figure} In the code for the transpose of an integer matrix shown in Figure~\ref{trans} the transpose is assigned to a \code{MatrixXi} object before applying \code{wrap} to it. The assignment forces the evaluation as a column-major matrix. In early versions of the \pkg{RcppEigen} package if this step is skipped, as in Figure~\ref{badtrans}, the result would have been incorrect. \begin{CodeInput} R> Ai <- matrix(1:6, ncol = 2L) R> ftrans2 <- cxxfunction(signature(AA = "matrix"), badtransCpp, + "RcppEigen", incl) R> (At <- ftrans2(Ai)) \end{CodeInput} \begin{CodeOutput} [,1] [,2] [,3] [1,] 1 2 3 [2,] 4 5 6 \end{CodeOutput} Although the problem no longer persists for this particular example, the recommended practice is to first assign objects before wrapping them for return to \proglang{R}. \section{Sparse matrices} \label{sec:sparse} \begin{figure}[b!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlopt{::}\hlstd{MappedSparseMatrix}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlopt{::}\hlstd{SparseMatrix}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hspace*{\fill}\\ \hlkwb{const\ }\hlstd{MappedSparseMatrix}\hlopt{$<$}\hlstd{}\hlkwb{double}\hlstd{}\hlopt{$>$\ }\hlstd{}\hlkwd{A}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MappedSparseMatrix}\hlopt{$<$}\hlstd{}\hlkwb{double}\hlstd{}\hlopt{$>$\ $>$(}\hlstd{AA}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{MapVecd}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlkwd{y}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MapVecd}\hlopt{$>$(}\hlstd{yy}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{SparseMatrix}\hlopt{$<$}\hlstd{}\hlkwb{double}\hlstd{}\hlopt{$>$}\hlstd{\ \ \ \ \ \ }\hlopt{}\hlstd{}\hlkwd{At}\hlstd{}\hlopt{(}\hlstd{A}\hlopt{.}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwa{return\ }\hlstd{List}\hlopt{::}\hlstd{}\hlkwd{create}\hlstd{}\hlopt{(}\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"At"}\hlstd{}\hlopt{{)}}\hlstd{\ \ }\hlopt{=\ }\hlstd{At}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"Aty"}\hlstd{}\hlopt{{)}\ =\ }\hlstd{At\ }\hlopt{{*}\ }\hlstd{y}\hlopt{);}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{sparseProdCpp}: Transpose and product with sparse matrices. \label{sparseProd}} \end{figure} \pkg{Eigen} provides sparse matrix classes. An \proglang{R} object of class \code{dgCMatrix} (from the \pkg{Matrix} package by \citet{CRAN:Matrix}) can be mapped as in Figure~\ref{sparseProd}. \begin{CodeInput} R> sparse1 <- cxxfunction(signature(AA = "dgCMatrix", yy = "numeric"), + sparseProdCpp, "RcppEigen", incl) R> data("KNex", package = "Matrix") R> rr <- sparse1(KNex$mm, KNex$y) R> stopifnot(all.equal(rr$At, t(KNex$mm)), + all.equal(rr$Aty, as.vector(crossprod(KNex$mm, KNex$y)))) \end{CodeInput} % Sparse Cholesky decompositions are provided by the \code{SimplicialLLT} and \code{SimplicialLDLT} classes in the \pkg{RcppEigen} package for \proglang{R}. These are subclasses of the \code{SimplicialCholesky} templated. A sample usage is shown in Figure~\ref{fig:spLS}. % \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwc{typedef\ }\hlstd{Eigen}\hlopt{::}\hlstd{MappedSparseMatrix}\hlopt{$<$}\hlstd{}\hlkwb{double}\hlstd{}\hlopt{$>$}\hlstd{\ \ }\hlopt{}\hlstd{MSpMat}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwc{typedef\ }\hlstd{Eigen}\hlopt{::}\hlstd{SparseMatrix}\hlopt{$<$}\hlstd{}\hlkwb{double}\hlstd{}\hlopt{$>$}\hlstd{\ \ \ \ \ \ \ \ \ }\hlopt{}\hlstd{SpMat}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwc{typedef\ }\hlstd{Eigen}\hlopt{::}\hlstd{SimplicialLDLT}\hlopt{$<$}\hlstd{SpMat}\hlopt{$>$}\hlstd{\ \ \ \ \ \ \ }\hlopt{}\hlstd{SpChol}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hspace*{\fill}\\ \hlkwb{const\ }\hlstd{SpMat}\hlstd{\ \ \ \ \ \ }\hlstd{}\hlkwd{At}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MSpMat}\hlopt{$>$(}\hlstd{AA}\hlopt{).}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ }\hlstd{}\hlkwd{Aty}\hlstd{}\hlopt{(}\hlstd{At\ }\hlopt{{*}\ }\hlstd{as}\hlopt{$<$}\hlstd{MapVecd}\hlopt{$>$(}\hlstd{yy}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{SpChol}\hlstd{\ \ \ \ \ }\hlstd{}\hlkwd{Ch}\hlstd{}\hlopt{(}\hlstd{At\ }\hlopt{{*}\ }\hlstd{At}\hlopt{.}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwa{if\ }\hlstd{}\hlopt{(}\hlstd{Ch}\hlopt{.}\hlstd{}\hlkwd{info}\hlstd{}\hlopt{()\ !=\ }\hlstd{Eigen}\hlopt{::}\hlstd{Success}\hlopt{)\ }\hlstd{}\hlkwa{return\ }\hlstd{R\textunderscore NilValue}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{return\ }\hlstd{List}\hlopt{::}\hlstd{}\hlkwd{create}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{Named}\hlstd{}\hlopt{(}\hlstd{}\hlstr{"betahat"}\hlstd{}\hlopt{)}\hlstd{\ \ }\hlopt{=\ }\hlstd{Ch}\hlopt{.}\hlstd{}\hlkwd{solve}\hlstd{}\hlopt{(}\hlstd{Aty}\hlopt{),}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlkwd{Named}\hlstd{}\hlopt{(}\hlstd{}\hlstr{"perm"}\hlstd{}\hlopt{)}\hlstd{\ \ \ \ \ }\hlopt{=\ }\hlstd{Ch}\hlopt{.}\hlstd{}\hlkwd{permutationP}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{indices}\hlstd{}\hlopt{());}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{sparseLSCpp}: Solving a sparse least squares problem. \label{fig:spLS}} \end{figure} % \begin{CodeInput} R> sparse2 <- cxxfunction(signature(AA = "dgCMatrix", yy = "numeric"), + sparseLSCpp, "RcppEigen", incl) R> str(rr <- sparse2(KNex$mm, KNex$y)) \end{CodeInput} \begin{CodeOutput} List of 2 $ betahat: num [1:712] 823 340 473 349 188 ... $ perm : int [1:712] 572 410 414 580 420 425 417 426 431 445 ... \end{CodeOutput} \begin{CodeInput} R> res <- as.vector(solve(Ch <- Cholesky(crossprod(KNex$mm)), + crossprod(KNex$mm, KNex$y))) R> stopifnot(all.equal(rr$betahat, res)) R> all(rr$perm == Ch@perm) \end{CodeInput} \begin{CodeOutput} [1] FALSE \end{CodeOutput} The fill-reducing permutations are different. \section{Summary} This paper introduced the \pkg{RcppEigen} package which provides high-level linear algebra computations as an extension to the \proglang{R} system. \pkg{RcppEigen} is based on the modern \proglang{C++} library \pkg{Eigen} which combines extended functionality with excellent performance, and utilizes \pkg{Rcpp} to interface \proglang{R} with \proglang{C++}. Several illustrations covered common matrix operations and several approaches to solving a least squares problem---including an extended discussion of rank-revealing approaches. Sparse matrix computations were also discussed, and a short example provided an empirical demonstration of the excellent run-time performance of the \pkg{RcppEigen} package. \pagebreak \bibliography{\Sexpr{Rcpp:::bib()}} \end{document} %%% Local Variables: %%% mode: latex %%% TeX-master: t %%% End: RcppEigen/inst/doc/code.R0000644000176200001440000001650012253717461014717 0ustar liggesuserslibrary(inline) library(RcppEigen) incl <- ' using Eigen::LLT; using Eigen::Lower; using Eigen::Map; using Eigen::MatrixXd; using Eigen::MatrixXi; using Eigen::Upper; using Eigen::VectorXd; typedef Map MapMatd; typedef Map MapMati; typedef Map MapVecd; inline MatrixXd AtA(const MatrixXd& A) { int n(A.cols()); return MatrixXd(n,n).setZero().selfadjointView() .rankUpdate(A.adjoint()); } inline MatrixXd AAt(const MatrixXd& A) { int n(A.cols()); return MatrixXd(n,n).setZero().selfadjointView() .rankUpdate(A); } ' ## section 3.1 (A <- matrix(1:6, ncol=2)) str(A) transCpp <-' using Eigen::Map; using Eigen::MatrixXi; // Map the integer matrix AA from R const Map A(as >(AA)); // evaluate and return the transpose of A const MatrixXi At(A.transpose()); return wrap(At); ' ftrans <- cxxfunction(signature(AA="matrix"), transCpp, plugin="RcppEigen") (At <- ftrans(A)) stopifnot(all.equal(At, t(A))) ## section 3.2 prodCpp <- ' typedef Eigen::Map MapMati; const MapMati B(as(BB)); const MapMati C(as(CC)); return List::create(Named("B %*% C") = B * C, Named("crossprod(B, C)") = B.adjoint() * C); ' fprod <- cxxfunction(signature(BB = "matrix", CC = "matrix"), prodCpp, "RcppEigen") B <- matrix(1:4, ncol=2) C <- matrix(6:1, nrow=2) str(fp <- fprod(B, C)) stopifnot(all.equal(fp[[1]], B %*% C), all.equal(fp[[2]], crossprod(B, C))) ## section 3.3 crossprodCpp <- ' using Eigen::Map; using Eigen::MatrixXi; using Eigen::Lower; const Map A(as >(AA)); const int m(A.rows()), n(A.cols()); MatrixXi AtA(MatrixXi(n, n).setZero(). selfadjointView().rankUpdate(A.adjoint())); MatrixXi AAt(MatrixXi(m, m).setZero(). selfadjointView().rankUpdate(A)); return List::create(Named("crossprod(A)") = AtA, Named("tcrossprod(A)") = AAt); ' fcprd <- cxxfunction(signature(AA = "matrix"), crossprodCpp, "RcppEigen") str(crp <- fcprd(A)) stopifnot(all.equal(crp[[1]], crossprod(A)), all.equal(crp[[2]], tcrossprod(A))) ## section 3.4 storage.mode(A) <- "double" cholCpp <- ' const LLT llt(AtA(as(AA))); return List::create(Named("L") = MatrixXd(llt.matrixL()), Named("R") = MatrixXd(llt.matrixU())); ' fchol <- cxxfunction(signature(AA = "matrix"), cholCpp, "RcppEigen", incl) (ll <- fchol(A)) stopifnot(all.equal(ll[[2]], chol(crossprod(A)))) # section 3.5 cholDetCpp <- ' const MatrixXd ata(AtA(as(AA))); const double detL(MatrixXd(ata.llt().matrixL()).diagonal().prod()); const VectorXd Dvec(ata.ldlt().vectorD()); return List::create(Named("d1") = detL * detL, Named("d2") = Dvec.prod(), Named("ld") = Dvec.array().log().sum()); ' fdet <- cxxfunction(signature(AA = "matrix"), cholDetCpp, "RcppEigen", incl) unlist(ll <- fdet(A)) ## section 4.1 lltLSCpp <- ' const MapMatd X(as(XX)); const MapVecd y(as(yy)); const int n(X.rows()), p(X.cols()); const LLT llt(AtA(X)); const VectorXd betahat(llt.solve(X.adjoint() * y)); const VectorXd fitted(X * betahat); const VectorXd resid(y - fitted); const int df(n - p); const double ssq(resid.squaredNorm() / double(df)); const MatrixXd vcov(ssq * llt.solve(MatrixXd::Identity(p, p))); return List::create(Named("coefficients") = betahat, Named("fitted.values") = fitted, Named("residuals") = resid, Named("s") = sqrt(ssq), Named("df.residual") = df, Named("rank") = p, Named("vcov") = vcov); ' lltLS <- cxxfunction(signature(XX = "matrix", yy = "numeric"), lltLSCpp, "RcppEigen", incl) data(trees, package="datasets") str(lltFit <- with(trees, lltLS(cbind(1, log(Girth)), log(Volume)))) str(lmFit <- with(trees, lm.fit(cbind(1, log(Girth)), log(Volume)))) for (nm in c("coefficients", "residuals", "fitted.values", "rank")) stopifnot(all.equal(lltFit[[nm]], unname(lmFit[[nm]]))) stopifnot(all.equal(unname(vcov(lm(log(Volume) ~ log(Girth), trees))), lltFit$vcov)) ## section 4.3 dd <- data.frame(f1 = gl(4, 6, labels = LETTERS[1:4]), f2 = gl(3, 2, labels = letters[1:3]))[-(7:8), ] xtabs(~ f2 + f1, dd) # one missing cell mm <- model.matrix(~ f1 * f2, dd) kappa(mm) # large condition number, indicating rank deficiency rcond(mm) # alternative evaluation, the reciprocal condition number (c(rank=qr(mm)$rank, p=ncol(mm))) # rank as computed in R's qr function set.seed(1) dd$y <- mm %*% seq_len(ncol(mm)) + rnorm(nrow(mm), sd = 0.1) # lm detects the rank deficiency fm1 <- lm(y ~ f1 * f2, dd) writeLines(capture.output(print(summary(fm1), signif.stars=FALSE))[9:22]) ## section 4.6 print(summary(fmPQR <- fastLm(y ~ f1 * f2, dd)), signif.stars=FALSE) all.equal(coef(fm1), coef(fmPQR)) all.equal(unname(fitted(fm1)), fitted(fmPQR)) all.equal(unname(residuals(fm1)), residuals(fmPQR)) print(summary(fmSVD <- fastLm(y ~ f1 * f2, dd, method=4L)), signif.stars=FALSE) all.equal(coef(fm1), coef(fmSVD)) all.equal(unname(fitted(fm1)), fitted(fmSVD)) all.equal(unname(residuals(fm1)), residuals(fmSVD)) fmVLV <- fastLm(y ~ f1 * f2, dd, method=5L) all.equal(coef(fmSVD), coef(fmVLV)) ## section 5 badtransCpp <- ' const MapMati A(as(AA)); return wrap(A.transpose()); ' Ai <- matrix(1:6, ncol=2L) ftrans2 <- cxxfunction(signature(AA = "matrix"), badtransCpp, "RcppEigen", incl) (At <- ftrans2(Ai)) all.equal(At, t(Ai)) ## section 6 sparseProdCpp <- ' using Eigen::MappedSparseMatrix; using Eigen::SparseMatrix; const MappedSparseMatrix A(as >(AA)); const MapVecd y(as(yy)); const SparseMatrix At(A.adjoint()); return List::create(Named("At") = At, Named("Aty") = At * y); ' sparse1 <- cxxfunction(signature(AA = "dgCMatrix", yy = "numeric"), sparseProdCpp, "RcppEigen", incl) data(KNex, package="Matrix") rr <- sparse1(KNex$mm, KNex$y) stopifnot(all.equal(rr$At, t(KNex$mm)), all.equal(rr$Aty, as.vector(crossprod(KNex$mm, KNex$y)))) sparseLSCpp <- ' typedef Eigen::MappedSparseMatrix MSpMat; typedef Eigen::SparseMatrix SpMat; typedef Eigen::SimplicialLDLT SpChol; const SpMat At(as(AA).adjoint()); const VectorXd Aty(At * as(yy)); const SpChol Ch(At * At.adjoint()); if (Ch.info() != Eigen::Success) return R_NilValue; return List::create(Named("betahat") = Ch.solve(Aty), Named("perm") = Ch.permutationP().indices()); ' sparse2 <- cxxfunction(signature(AA = "dgCMatrix", yy = "numeric"), sparseLSCpp, "RcppEigen", incl) str(rr <- sparse2(KNex$mm, KNex$y)) res <- as.vector(solve(Ch <- Cholesky(crossprod(KNex$mm)), crossprod(KNex$mm, KNex$y))) stopifnot(all.equal(rr$betahat, res)) all(rr$perm == Ch@perm) # fill-reducing permutations are different RcppEigen/inst/doc/RcppEigen-Introduction.pdf0000644000176200001440000124752013236624367020725 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4844 /Filter /FlateDecode /N 90 /First 730 >> stream x\is86LHR[S۹&1;h95c~4Q4%'[;U1`wV0ɸ+bL3SxfՖY 3. >8g\x&7 x(=d[\&K q0=N f[\p&`p$TA &xˤ3@=SDCr14I 蕊)=, diQP@5& 8QiJ2CbF`> S Rf8ˌcƃYiH52kfuH`aN^q;{9@9s)/`QK "!ApDHl^@_ Dxb 5Q9 ICV\+R\ [ XKY[@ _ q#VP =r?:@` [8@O!`נ ܑ@?;py@e5;q5nd8OAC=Sw09^ќ}9zxNl_ΪzLfd?fMQU_4dK,Zϛ ś }4 ѭ|o<4s/}PkCBC \dQ'B0x#ED( &=[3̱ܟ|]:RxqIFHQ5,fj #QbtʼJWu^|?duSeGwx,89Ofgl /14+AVDxBZUf*0xq2KC4Bg$Ea;t>wT;qƦȼ!2rڠz"*̪T7zREfڅ҃]HRH(]_ّy;$i3.$g??vr,>sTP=5| ).`  !Wɬ(<$aĬϿPg% p!`,r^=@ >q[T(Hf࢜p!|0TXOv PE%YzXBC/!!G kp͸z_2nIknn'/ދL\6}[q?{ɫ}|c:r'zqc+f'|q-p<*ӝzIOzL0um3!w]'ټ5MP#Ry-ӯ[V֩NvJ))Q:hQi_40w 1N}1%ʹC6W4W}jVek%#I2##nHŪeD"VEE(*BQPtiIy%TG,?<~!nO{Z/a~ нU c7O<~>zku[C]hf߾~Vm=߶]ɣXt($<) <%yO"(GNo؝a](Eu/}ԕ>66]}tDcv8v=r Jve+MNMn\Zܐ]a=vZ@H%jul\HF]oFVgU:1n Hh%lA&Ꟊ0|]5KA/WʾvHfc.$\]NvbM o׶wVw:W׆NvP^HhMs. jҤ6Se!2b5!~¼4"G?䎠xEx&5H--4D'2ӑ@<D@]D%3 Z+T;+` 2(U_^B_?⻅BS<&%!uC2UICCu40\2W:DElmA:'9MNt"Qbf}x#M0IӛhB)IW8ŮkEQdt6e:K]4N'|' &Q^$4N-zo{`|;}G;"10+.4yvt!ɠ^t c0! hC!08GRsQj))꺹ug0;T'AICF|B@A" 1y ҠFݧRGоD6mi#x*-9kEt8h l 1H(a#C=މ˕Y[{Cw䢄t J,i\mLHc H1@uM&PSW|12 V }p VG ^3J3TI.=Zz؂~b }1&yt19A:yZqR6`BpquWu"Q }M4#Ky[ޘ&P( U]'FG :v։?'V}k̋NO1R.zRXՉekl-c҉P(k&UL(BM ;ߑ`6:ʣ|CJ,Wmj*S)X`y;S|AQfބ[)%ew$ 3_ƭ,zPؠbKG'SDU!C w9|-mpZQxGj ת3}mCYA^cKq.牷)ʻC\kў$}s!v["C=>NRKH)0s\-č2!r 1ȗ^!jkJJjTx95pH {bA9MgpyQhV~/?̟8/|U~_u>|OɸʧՓ|7ys1}p<qwD<|JKA=C1;Q?y)TmS^N|wҟMSѷi9:=+]b`bi'"Mc6ܨ ,}nW7σ ?V<參f<_ q_' &8 ÿ.Ӌj|4pXkڋ<>yU\uNnu/7W\7BMs>;VWOG5 h}n?vztr}ɫaP'I^M6ybY=IѨ 謜_8(yi0,GA#/+p^j渆bIKA%C/cp1[#/ip*X|H#w7U,kIE_.\Qu\oVO!Yo]1d}l{з+ ɚw} G꺩gCyـarj~TAD&0Rm\f糪쾟V&AS^3h_Y5&j=mbi6KFOԪ/굓9} .[a@Ĕ-5G Lf}'ц52$mJ?oҟZz æ ƒLS٩{36kYKR<(Y}=x]ol^a*dBUD) ܴg;d.ń6lS[7!tvxz tm y5/g_USi/n{YߞPlmUtZnrk[+SnNwr[({D ?1a|/|!ܐ{ Y K\rT<> stream xڥZY~_X!́+ymeŹu`K`@&7>}`@bUR*-LOO_0Q}s痷7eEDFQY%Jۨ*Qnݪ2qRϫ)2zh-7=V^ֺH,`J%n9A%y. .AzQ֠' v ͤ)Haz}w'_p`NXN=-dBKyD%dIUZ_ݲMble 5 E9stxe+]]j2K-XT?{y+̲[I]M14Lhpqqÿ!q&uaύ9l pe\fʚׄLJr"4#)0T#FB#-`?UWODTt 3|V':K e\0g-&J}A⟕Pn<D9WS Rm>UDOxz\Eoxf[_F!#9AM̏2l<x }ĺ6 npL1a⋎qٰνKC=4fY@в|b3<[޽WpEoW AM͹ 5[iȠ(,NKfi;4nƍ+ Tc$VLYZl)Qu-| ppmP \9z;qO;_$sI0['EGk8lA,TR"y9UE @ijxFY-БA?B<8HfXLTs}h:zb#zI\]( x_)|]_M,>ם׌\-$w4rДrD73wy{,ţZxN7$5"U[lHt0A3vβ16>a&u%r D+][\9Ue[͌Ƭ4\G^wckH{ogT2_֣DqpJǕ=k;Zn/#/g#k -kxoS]I~}a–&K퐨2[)pB7۾bc4N4|ع!v\֢Uy:'Ŕ4V`o/b- v֘ZSز%A,FiI 5lBG1x0'Fb;}ƿ}ItYTxYC~ق=Srj ۪K<$yv o 7&'Q meN޵tz4Q Eb;#NT%ph梃4\6=| ^OEˮA`_5ȫ)'UUnbQo&Zp"Ɋrnm?_*}qg' f2m2FG7½srWi ; ʣ c#3T WcpKq \]vʦ6md{qx/8ԭ5Sv*jjaFB2%cC;\`[_>sAH5Bؕf`b3Eo_d4MT@v.d@̡ܝ>3LTCIhςtLqx[5N$]Nh8Ƈ]Op6.H;4 O9e ʑ>th\P=!prvy~_*km¦-Aw2/wP wGWۺ&IYn{gw?+o>ܓM$x(0k+ $')"?ou G3) |3  faܓ &6gUĎ;߅) 祐R-)m~~br炣>)0G*2l OqH2UaYsODΡ]gH3 ~H8endstream endobj 93 0 obj << /Type /ObjStm /Length 3498 /Filter /FlateDecode /N 90 /First 813 >> stream x[msܶ_oc2IZKriRwYfI8愧9sϜ!M`.J < HH62JѠiXkXJZrc^؜y yCcnZ> Ɛ@v\iaD %PirR I1HWHhO*'t9iTFHRy0CS#"`T) C I֤*>NꦃKU]lE(,ۯgus|QLJ=*T=*Nih-٪-ӱ/Z ˞5崚tu6_젚ueʾ]}[Vg]\uPSzs5E }gn>7АNZ>3J?7^| a]c_4G,{_sm1ٌ#  _m07JYqB-ݺHDŽNu]_!uBrcJs.ĺ)LY[8umRh24rHi-"bUʃ ﹾƁH/J- .PPFd{!c@?rhwG'eٳG,;)u@mf?|zH}?/HQN"xbxyڽ*[Bmez\W;:?>\lZj7ŵ݆qkzYgiW l`is,MI}xk HU2s6SP\]'7=f^L٬zW7S̸e"d-˞쨜%x1 -|h5r=|X'3ၲO LH7LJ; S^A> 3Rp2_d;r"kN|^4w=\#,!DgBXÑ܊8XF8\mGxti=@n jHNm8#ߋΦnY F[6 }oDܒRw2u%H"K#1QD-M=D] vbYZʶ^6 Ar~JKiSKj*KS(5J%B%8ꤽSeSrR@B`9DKpw);x0\*eܸ8C ijo֩"1[i8m]}*_&ifkNU(nTYzSf>G D_qN~:+[{U̫{z#V.0JZ 0תqMg2,eWT3~?i++t" X PV;)xAU Z=Ae{eevܵgKґ$@+5'hߏ?E/&z쭐H dVm4WQ0j~wiJLtui TΊ唕oٲpCt^Ssډ8Z {cmW ֖MTr%&}uO0tgxԮ!W7ICK@Ob^`~ŊZ3$wָAՁ^u%В m;UkY-jM٤ۖv"+XSM}LOFgG䚪,i9U1>:`W6jQ,zZ<% ołpHendstream endobj 184 0 obj << /Type /ObjStm /Length 3269 /Filter /FlateDecode /N 90 /First 827 >> stream x[ko7[,2(N4i A'P2$y=ٚH`qNH?AgDx&)S %Y1i4k&#edc_̠Ӕ+U6rڡF=FF X,E$K0ڢ1!q#Cz(L' +V(%VCeu`\++luX)l SC:yL@γ ci-\GF+mlAwh{!$ 6RIA/bzX<$!Xa)PAL"vLт T[thytkDLx$I{_X'H"~HꔒzrrEΰ< 5ʕ#;D'"{ BȑosT4 (eFt I21 cDza Ħ=A&a09a5Le<1l2? l/ƗLDQǓ7Wa#Gij0{Nɹ)t.Wt4'xXGY3|0kJ~֌>(mQ?o.fU)}Fdhho4;r7?ϖf#Ѭ,)ퟜ4tfkGc 3 Y)^c:G/NmB\ y8#V093O_M7LG鑨6ćYM~'4Gtvy0A RLEB7|VR~sq*"i ey'Y&OwFgדRߣQߎ3)^:‹<_7C"xGlVTq5  >,|tǜ'2=` zb'7R"2S|d& ?ۡ^_\rhU(M0-8Lb'fC\̗,W.|_QA?t :bk-0/+]-PV3 ;*ކ +MyMZ[%+Sah~xIܰ =C3oqr MLhmeuolɊMVt!< У-5Re6NŽCm&1@{5W{{HҜۊw fdӪg*LхF2RV>eyY$–9BXI]<^W(Ji;۸P%Q;b&L_~l8ɬEU9n*l˼`K[DzLm&U%םE ڹު䚫oU ۄ @RXMų  Daq_gW:% Mϫ[[ѧM}{wXbSC~pcփ^/Md:_Ȩj]N}-{srx`jvpuv]Uۥ!*a"c^\ eq+ChCG+(+@!zMzy[_$!gAc3 VV4 ice/CXyB<7Ḥ]X|;mFq=˳ c]Mq]S3_Y~oy.1j k XoV{d[PPVUA ?WƸamdKO5eRĸicوx k3)GI.njIm'MXQ`/hpϬ2;GJn#K KdIV.A rn>,wafo'v:fXt&*`ۄU}ySo֎&ފfn ujVܙV>-/zUiz tBV<@SS^IiءaخN.ㆇ-T+O[ Gwpz|l+{"+7ځ"\X"0N2f;굂-kҏ,_-|8px Yi+//(\IUmÂ7ͮ~\40']}Y\,n*tv9>}j ,v>=0Y mu~fVHn* ĨXaxޠW)笃ݝWa]%5KXnL.w0o't_n.h[k+e%n݈[)OQ-)yhb ,u 99ֳ?kN6?la2c5qư:fa3_u͈"fz>gk+tf7a}`>d> stream xڥZ[~XZb83@׵ݦA !) Di7DA\e=י!Epۤ7ӛ?߽oLimnV7)2]~safosݷ,TI LA]>V٬nY5%>x]Cf-6G- ok<;Ɩ{p3{oκ2I =y\;&2i0w&KCTE WaY:;cԳ[[ο={=pZ$ʌe7 ;ckVipϷy1kͽ>1Œ݊.^>o!r Z?nZ0gЍֲ5s<\$ ]<}O ~aHC+[K'Nu'Ĕ2=t=X*<%l-׮D.&)1ĸ+ Th,'|#.\[{{1@-%O-%qcb:ZlO{%8-lv~Xf=zRltOnbֲLLp~,ezcTKnymgd=ҕaJ]*I"f/"3hD3<^˴=E 4D~(񎴃.;^ 8!xB,Y $=x;53;&߳*H`f lʡ\OJy/ϢbfσAt~"!O܌!`/t3ILQleEǯٕ*İ /H89ɔҹ%@+tyk+XĖ pXBb X.m ʅ2.:2^}h!1Xv'fIhQiŰ*xyR܇8LjDU!\Z%eK{b;l.eBQ _Iz,,sxQijaS0K0En+d9aV -Uɉ'IozX0|l{H85h F3nT,n'YIaqG.&zkbW|H|f7pyeƹ͒2'8 <'c)0!~#F ~5pP?;}NʆoL2岺gC6|IЈ͕Dء#;wtU",KwK+"df^Fz"kyj$&Կh-%gG1<'e\޻ =w+Pȸ(gfِ'I);@cdszl8YORyLPz8 Z_K.y6y Och~0mg<岧uRUqy3;uѺD|JF;=M2ǺJHț(atyfA9n:)2@,qӤ2jMm6f:PtIsR2 ޜb^`6/BH3I,ji6f<ՖEGDkُeƾص^)K &v{j>eG/uZpͿbgN!4~DCQU`\&۹W.&Fj Xyò㐁31$ )Ϋ$K. /ّGʫwv&ߤ4a+=ϻt(NȇNCkD~hҷH յ;DNsiq]^!ggK 'UM4[d*562Z/m4l&-斚IE)>:#$D('p|VLlˊTg\8F#jEy!d*o.+^&ƕnB&A:Z{u^pR)Rs Qezm45ssaO=d[Mx&X W}@:7Ȱ*gyc8g%uܱQr"8Ƈ@bY@5ɹ4 PS:ɂCN)΍7u#3R"4d[6ւ!i{bJu^ѲW\AphKȝ )\iJ42Ug7I!F& @tAyAfVd|YiH& n){dgЪas uXt/|4v0R :KZQW%N6ɜtiW..r6:Ƣ11<nh,gmΩ܁zdQ4\ if,#K0MEaC5OF̒>p/g?y[W}_g%C*}Ui?p \6p:vE߂w I~pLu4|tRw 2?ahZf,I7C8e9 8 +f &U߬x~嵵߷BDN$M]^P-P)a/]$M_Q5iѡZm{"{|B‹Pfuc\eN+o}IqKF0T!V&U`i ^E3!sD&~A}nvWkA=g<ö:A$J4N@F#}C1' v)LR.8TWoQ$/J/! I+QOC|, "^=^z+>CK달| 2caNZ`Ms|܄oàL w#fO6J8NJt5Uxt$>ۨsPDܱQpDZQUϴ~do8ՈeK&%ni^M'!6tg$4%$i#-ۂV ^@]&_q5yfrc% J-rJ=!لy؜,w'6RuW?!A]"̑up_4ؼ2g<//{bCWyRnu+fL =w9I4-(J%d7p)$+fZe^*ޞ[e umfXQ ǻR'woBendstream endobj 276 0 obj << /Filter /FlateDecode /Length 3026 >> stream xڵZibL5Ɇað 2Kwe)iSG_3lr!WWYdY}wׯk"oFdLJKQfFzk*&#]t|{ß'{jmt+sc|V߯0UQHTι*WW^ hDUJɼM6__0CV䪩4uRQ^eo~W`EMVI mw&%(4˲qrno3'a*t3YiRW}*E6iv%̟3V__ ѿVK cayFBǭ %|sg[B H]E=ZDwy)JKU&Ưi&maf"48K'ƅ#l#۲t5; @Rx>t0X-Amܛ=K{"Y %rdr]]q[F5t!~tj=>E6]3(uŮTȋ`ؿKcLNCs&0LuZ#هR{#آqOh'W aʴO#yc`{f`'; ݦGgro3|Uf@ȯغR #z%*$wϽqҊ@wE>;J+j + e^ ŃSH ),N$yK,/%`zL >a@M*D}aԑ%jؔnǶ?x՚rϦ"Ƞa e"E48Gl#ivc=$+ፐ`qӓ% hdz^@B&*CguЙ;L8U`޺W)倝&Kj#xU>g<2/wYSPq DS6Z4E螑CD*g6N!8%-ݨz!sYz# }RguQ&j  % 0y͜8{/cpevR΋4*\wt;j)'} [_wd>Ȇ{`gDh6R aӇbe Ӡ|<~m8\3xtH6QZI  Av=!Qc%E|nfg-<~px S<݃v9ztlڵL{;KQp"JI]uW{\H^ُf l8eJN#MY֣͹ml 1~ y%_-{\yO9!A` E2q4<{vΊ<%%;WvۻE<ϣc=:$92d@cu!'MH3'T/Ihip] [nrU\,CCoAݒЈ9- ]\3^$vb]G˞B; Cs4g!wb/; 6(oQ mvZl D}Qw-H`^G[ZKBf[ fhB1HHq8Ǝڿ"PY<[C3͊,Li򜕨bpƒ۷BO\j̑3ժl:n~ǖ\=]Nͻ܅AwP{lˮCGJxlJVu\8[u>iKzx3tliz m&ڦ\~(<Sp||-zEY\#Q2 RŎFMwk>yv%A2T#BGݲ_]4)f]U@9LXų_b(BJN.vIUCGL1(TSmӤʅq\z:ӐzI} )!kJ`n ϻIoaY5y3u&@' ?'b6C76q2!_ \lC@ະ[X` .#>.O**UAi3Y4KQ'8w*e}ze<}zeC*qy } eob.z~`j][/Ƀ!!@X"wǶDEk݂cgW* ^DfT c[WU+%څ}lG{-y*4(({w ݏ1󽋪^gru\~a߶Q[Uxw&F.k6 Hv(mϸo0Q(1y N|ax {Ð {o%t {CK[n) Aqx,!&UK/^Y;寇N?,P7q{]ѕN.;]$ޣ?N7xh~/p\ Q!:S2{j~l\> stream xZ[s ~ϯ4bΤir&sN;:a-eղj8@+ʲ4b. |$lӳ?={N#:bTQl>46ăRZ nRq&SոYOxm=~ jC~_c[o֬lnfk Ưs{vI)c.EV(އ^2w0:"0҈EUyY=ZY&鶛*_xsLU3KuH{/b[\Ih?rtfC;G`?oԆ xih^[ gIWEIl~rMY2SEnj+Wrh4mQ! ɱLMn=nܪHd21ޡxqec{rfN2ڜ$eB7zYjCVv'Rb ܷ$[( **u[UphO=E}Q|KQ([|Oެِ iY\0,̸d ѪA~K4Ӂ VSe󉬇FpF3oqs;͂?uq-(1 "fY/VRG"[?G|m v$e^Z&}wn&|6ɴ@We[%b[E3iK8]4a\:%RFvwh::A0"8y( @,~7D9iX.2qe݋>~$o`闊=%'w= j6x:mC fKb~Ey|ώ_A&B}! juѴL`0 h1 "ܱaU/ PDq%E-AHu ћ ņhT)?͢ݐ l)I+et~!p7={;ocYdnXt&t3:DZJAm_bcGS^0(bu w-O'&sOv$Xqz=H sV3C|484W;5ƔN|r*a*.l\>B75#4Y=5%0*'UcmIBǴ% 65ddXT-EȑS]TږnjxAܿE# 7!W h]pOW7N.Cge M+l)cotn漯M}>]Ýo9X3_{*Q׃|&Wٸ-|t61XgLlڈd>fԉgLaqV-p-f$ҥ P+M\)z2]pjSq5Q:/kLNY.[@#[Lp^TPX/.HTE"gZ?A%v(D7̭6Czglf.@Bt_퇛Tm{a&6׵ 9n|4ԕMm<^B 'cIÍ7tKz8_ | {nTN 108 ѥv #x;(W)ʃ͋ awJT!7pjpa|pD{+m\g6Z*2o.B VJrh"59aM X]v%Iޜ^K.kQ˾!Ju—ʾ6嬭,{Kb4+[l7;[1twNxC/zn쳿6a9s恾,-&Xz"l>¹` {|r»js oםܖ JOl+k9MTʼn^۝tct%lؽH;u˔fW2z0:t~aYT]pd6[dye:qJ̥t; a.Bj*RLJqwAjN!Uǥygu\RU8F1\k§b >CLìԃ+ld*-sayD\BZQ!&> stream xڍP.@ww0w @CdwUT|Ov;3Tdjv&`);[gFV&>+ Jl KL vt :$/<;[5`caΑ tv`'d*q;{GK<ށh DmtۼDjv \pvcfvssc819 2 Ul ]0@h2&d*O xXC@`[ [S#%8@MFd,'XXvoG? =bk0XJRL o" XM^dH/Ub]"o7/,ik*ngcuvB z9v?;kekf0ؚ.ŞY"BGfvpps;Ȃ{u{?/x^@/^N@W0o#dVV) 0Cl"_q豼+'2G4o; `ey nl_?5ZZR{Z0?Czyc?L&ېoBR.6k/C v/k`T-K+fgm:gښ[}');T sVk1k-X R0e@V/@ٙ^06N. b}DS# `fs~10sDO.Noџ,7f0K n߈ b0x_7]?K|[AſK!V/a___%ߐryKR?cb% oyd{ނ\_.?vܤ`;8o j%rcܙJeZtpGD93`Z4iseKݕaKG86oSuHoEv5`[dr\xЕoʗBwTv?sɡ<0FkDQd}'Gpf$F:sǘʙ|&G9f/ssBͩ@ klKl?Yo8j]p!;G)-OvѲs]=nߣ؜2m٨.δ& (x%/tX].*ꞈ!Gwg2޵O8432SApeƈt(Y]ʺu\_繲)`SiKM.]8e+7LF~im}Nnu U +@7͉B Im2M9ۣ"a ?8Ž,rLaƫxJ-M? RN05o5,p2bBsB=!b%e|o/K-twe刲}?V t‘aQUϋq+g^EUѤ4`>X {uf@~_r0Kr=%jf2}LlS𲏥x#ExGɟ̅T9.SV)]}tiDQHgT)o)z9~g`b8fhl=`ZHv9|Svu"ݗ>*J5Ki&sx=''t-<+\^,ɵ2q43#<9idn;!#XH[gfyٱnTʶ%aic< f 9\HZ׊8r歂k'[MbEDk W7xuT֠ iŸǺp|sm שmΈO l,LCi=$*ךwKJk &K"'*\^%HR5#TwKs3sp0ad-TCr<6AQx7,W7e7p2L@r,&d0sΝ k L:՟_>8:?b#@}\1^K[ժ& Q?|S@JG\,ls4/ۨf^|ն),F(:RxJ1 L]-ndM)ͳqoߪT7p 1@sHUh2̻I^}2H}GK_pNMg|Y =TIE@Wg< Sn_(.t#Ǚ 4tUGp ´+-u*%F 0;)%[U~z7s_ӲvB}Q_VQ-;P}>r )H~ZגR 7R>-ʢƄGm 85x>TL! cVB0x6>o0GI:=N-,zpK9 <EYPvOTRIYܤ_md~zFg5"&sbM$5Aҭfwk|?]oqZ+N:{’PCfU–K[#Nm@S$Ʋj_h a16WD' siy66` [ '[U! %xzw#q\5i@{Lwiѝ|e҅8Gɽ2 ?B5/H]۬DЪxZKx0xdfѿĹZA7!o>zG"=I$ .F-p9vά?91c˦@ }m]ZnJ\%4ᗲA0<#v_]̤VDT;k7?kۥ s 4xp*rH( 82>3\3˦Z 0n76˖.C-/R+A&0uoKJ|ҕ1a\䀅 wc=9_G*||u&^{u./i,H /2>*qz/N%+$k`l^.Z`S4dEH( k)%0InmrMP6Q"kUit޺/ų$zJDkꔐVq^bS"-^)9X[зi'Z 1:yWZ:q`hn̜iVW԰-|=ߞA$NpI^}p^r 􉁐ѫ2K=B,օ2TdR4~ ,3+۝ô#['O;;p ]!lpAuNM#6|Re\ ^Z! ߯vnmz_"^AŽFŒ[fDdfy)p*lu,nxbZtTq>3cV%I [jyS|b3Ut=JY4Y rFE%߮EB=5TOmw򴁞!?d\]] G=!dED3v_$=|@`8 msr;U8jL!gOZd{! f_MDY\n _7n4Z0V4HBƽsY1me"~/zNeJNx}}=Rk]lxJkH;ysU`JݐPGԅr|dmTCs%[Vmz[+ay4m).RXUU;t.jlHS?NTn-W[E KAdBM+ÊgL2oMAfD6zфy3c_Sŀgoؗ->`U>AH5문9i+H5'uFZ1.jFM䄻Ӗ: a=ʋ}am@QVfX2.4Șbv$ G"-t)ݐrNS24wi8bq/D+], \y#wԥ {'ա(S\ctupŨJ""V,IoTЋﯱoz.R⋏5OGJUpQH=0E8U&l[A1Ro{(VW¡*o;FUL,0]{3׶g5. چ"(ej{vu2;4ENx $ch]ܰ~`jvET,ȋk!$J@+viuy"qߧz-Vdb >50-Srt!o{sA^>NQIl)HnDДxM^qKvo|>Pe>/\gZ[tHц(3n{$zR9tRh8_7+˶Yн"DP?ٵwRj06\ }|#:xph_x,Cӏeoaؒ;0*Gc:t9!= M~s9 \%VkKx\i U^eX3{DkܶnUCI4xvlepg5`>`XQFu8Q; &Z ț+=>|\V^=YHupV<&-]d2H/#)#S'Sf@18qp506*G•1>]56NKG/rs)tcհD<u7sOm܀w! .ɏ{yTܓU j̴[$WETX.ia6:CËS%|%2G[:K؂!&DT-uNb+Ҟ#(K_:&C棼)i:n*Ƹ61yӉ{,`l*ߢ>Dn٠)^#UĠ3#@K[_J+AqwLį i7bl3-P"M] f^)c<x sVܡ9{Z`KC?iQPu|gsP]t,A,HSl)Q)7[jEkArR?sXj;P[¯ Iy:hz.96ɈbD1Ҏ18땣W( ӾπK,вqarxM"ϭNr҂Rid/U 4J9IDhz!#1Mi*Q$-%Pcf{=JNhv[<]= ԟ1m HL u1&kAks] 77tu̡F|rhZ<51$ܤb-Sl3`8~ˍj_pꤍ;cOk+i#݀e@̤ LH⊀U 2:vV3A%DٕhM aMDV츘s"bvҝt\heT\%Ekc:wj*RpX6LXaTFy;PҺ?" ۏ .|wDl w>k[RP,LY|=iJfQU2jsЛdd81ޭaTRZ/i)VRy5دb"_a-'ȞEGD\L`@KoJ{3Ne|ܸ(i rCFF̯,f]'IcQ@_Cs4# ?.qX۫wtC`6[ja G$;`ѡJcnnc䟣גi38 vȸkҥ+YhúU\59)Km!Hҏg di5Xˮh&9`!$Y\dCQeð!-9'҃r,?@Zvl5'QnHgئ6+?&!\nLʟUf,0Qplc*ӆ|1`0ؤkjv 4#aYWpHJ䒠a|n `usHakHGcMҚ_T0y( :_5ht Օ Z;'̬[v$IXF,Hh:މ5 U&o4DDG|)`&]`K"f [-˹Ng>w}B=&^s,c{ʋ`l&@#TlRs}c%R|H}3+byԲHXC~sQBVKEL!OGoϑP1loT83oD&*ؽ >JooeCUR8ʓLﹼ==ѯ1T"b+m !/ULwUaGR ˟F*rbSX%=|O'{PöBSK5)^/uGc|Iٮ~U`f]U*`ܮ%&(VIC&W\P$KVn)= 7nvƀ &~ylZBM G(y ;ܽ1rkx.0$%FuT%Z + WE PuLҋ[$WLɎ S?-s2@'됾5Lds|}ͱœxJ̠Ljxt@X.2{YO{}K9.L ,$k"˕̒ɡD`=;{aVX:[.TTF[SE/w=E+q%pHxƾ-!L#q3ܢMDsNw`k%aXLD6Kqhwo4=5c}qF# qD.@kt*5yV—7 r%0k"h5 }JxP4"P<2; ߴә6_f-WsYƨw-\5<[3jEuBk"m BD{@ԊJJEH_lvf9iug}!:u,%횅] ' c4bq 5Q1sKVx<6)(oJ6}RDS (:w6Vr%E:hd $ӈY?-TA2T @6>C]g~܎ԅa.zCrZ+L\4^FGYY+E^EpehQmO-2Ƌj1d9ZB+3$u"'_3w3[RMKPQ,3vNr*: .D?؏W4g-='5fQm|/ߺ_ |b;ƦXR]쫡T:7Bꩼ*|$m/D;a&c0gR=aOQ=M K^!C(Ԕ&| 6i.hm7ϊKrkP4* x:s ޔaj,D?$4EUB |t6APE|Z񱡼O-=>Thԙ>]NUyT}1y@Qp[68>.>Ю9G /<{m=] xp(G+ gx)6f4bn_(bm/~Y}'* _c.Gӣ7Qpn(Z &Oxmq}Y%4ND^#4#Ͷ4gdt4&_d=RŨ|dK[EB΍`䳫2l.v+|a6`o,1 rxyzcea͇3%D UrPpXc}byc[jVh;#)cZưcU9DqzRo뫢{$d's lR%]?)_ph1Fo^Sv^.=S a&->p%rrc`O_c ͻgJ XYpяpi1׺H\$7wl=,X*nD a> GgBs䨫[=uRD01+RDR?u]qf mV;BцmHˋvBC( x˸PZzUV]ͩ$!16:Fas!z/M* MZ\{TVV!KtPfhYn3cm'0ԯpj듩q5- y$FMUEP]CIs9EI{&J#ܹnk*X1%"ka}?BRɠ.T(xFL\"A WB8+I"w]y4W|nbܕ,TE #>[H {3݄J yUgK,Ӊ|arͺZJ kBRwݪQeGܕ9NBs"ѽȧ}άlӊtw?DzZoGʈpPRr(-&~~P{}Y] endstream endobj 279 0 obj << /Filter /FlateDecode /Length1 1661 /Length2 11009 /Length3 0 /Length 12084 >> stream xڍP.KqP($ŭhqb]k^s9?sd&]z 6$qrqU 89l\htt:`_r4:=$ iW"B_ U!N%w'7O_!/C@ N 74:i+_KF ?˟IG+PBmA/-m% "P;'э j#CmZ 7 G5#:`k'x8-ANn/.NV WKv @_,p{3 t;  :Yatp=`ŋrKf v0/,d% qt9AO |9wokt;YYц3(͋?2'@^$vC҃3`l zAuzPWw?89V`K(dvBO1/2W~?>~2ya?]NJY]^x|Yy\NN>˃N_徜ӿJ /#cA^ 0/_tD+swpSGt;xm\wB^vMAuP.H:8nr`/ji]h`' `vYڿ\n/SzYN)d c˸^& tuz |9_ 'lN 95평%*@G +%  ߈@.!Khk!px1| ?N/!<D| `HP:os9^/my, ¡vՒd3ti_ﱐ2߭^K&l2\I,Q=#lMl{{4ךmC[|?Qx,YGJΪ#l۩D.+UWb&KSiVPWb'9G)U\)." y 3M9waJsצ~+bj8!C>fN% dA9T1Rȳtkp߼d4cJ$9ͫf&`+RHwbFSlj99QP&Lq>=ҎP;zqѶ9n=C9)dҷܠ=O|'~cβ-ԑJ煅}\u6:zZEp8m 3I9E ^.T)t|m,moJךhF`m98qX~p4i?r̾TRd5slmsj D-JVӦĭ|CϹRW":/?ˬ?%3 %ڧ8WaAͯM$ޚ1V8zov]:PLoIׅ@@16 27ờrm<W54^7-)PcW@?df^wLU؝'KG"Q21Qu,m*vI'_;G`sh8znliݍ$7iBІePYv{&{}lPRԳ{U>㒑?R*ojsR&0H"wKߞ^޳xCUul#NTM %L-˧+qW3 Sr#_ҡlG[Z F%!+eEe b2i;`~}[9+D7gb 9m氽1ozq]Zcr 2S"u0e#6slAN)[wҏF^c^N=. :V6Ke}5=r;:I*x;B+Q vϜ֧1`hв,צ+ /J[om{J:@+UPMh C%L: x+[eZXAM~Bb2+ڵbr/;oho!5րuYjmնxYUFRr;XT;gKڙ^WS9A68W=z;򍺣ާY q>@LÀ=u[oY6j}MH8dK p_GHmO[81 #`GLC|x޻3%!$U(!`K?Ɏ&yqB>S(}ZobT:T~{/Ӭæ\ڍKE9+{6[em|1w/9g kZDDHe`q (8DzcZD8G1hGy HbBMs˧SïP;EDq=pb*TԶoÏI>HT2lcs0sYUd(.{k+nm} \/$F^DԞ8MQȀ9 Q̯MJ2Ԇ2dCӖ7G "qlYT - `ޗ*^k&$ӎPj17[nSl2d3R5>*Z-wz֎dS+ˡw9+DqzC/|J-cqυXCFjT XI!M-V@ &'r019΍HKw7QX~btHMA5=ˠņ1{IrA>qMy絠9D뚶5jŬ32&aov4.ߜx" 0Yt᭖iiL( \h/ s, 7uM#c!Η8ix%$#Q~Z_w1R~.ͱhۘ4rZˁ ڧ$aLiDUqh 'Ѐ0e,R-|[!SDI@4e.X5s닃Lۏ$'7'?*pԻu}V,΁U2 XU?^恪7T)Y6+)Ke4ٔqY`I'x9$vND sm ?O/ut^MIp NO iswY]:z>x/Tnt$p@6 /6HxvVKU8J+H w: fVoVȂ@YQ)2.,FܪnH9vf ӄFlia$wU8BG#Q"<&ei]\m }jtLFUԟ?ɵ(1h?LWos0Kꬪ _E.۔ʽZHݝ0[yϪI~6bt{Rɢ8-W 0In!۩ye'=K0]F" ,0m !6y) &qB.6 ۖSz4:{3$p`(&/ӷˁ{X(FâFǣg߈br&EU^ 7rK00 Mť"N'aJTA vC tm%. n'm~?q 7A} >D?B*gLd`=aN3PBV4@&bwR| 9MV7wyA M~I%qmObU*1̓2|M1'ȭ5<訏}?rOCQ 42vj8f:xꙚ !a_sZ) K} f8^щT-?³T\ Ea6k$6|VToG6. OlilǽH|]RUrڔ* ϱt .=$uQ~EBI)wo`YG2tVbHc-q]ClJL*LYei>}),[ov#1.WV Pk3N,RwOIv+;te<7(_{ w\ْ8^&5{w~H l;Ͱg4omź'#<6Y|xRrI-pi~Lz;`dYZMܷîs.-*]̈́'#|BA)a㜓@$ .!BNqΠ#g-l]5Hw(Dm"OU's~ ǝ|n>Py/o7Ύ ʛ瀏١qx,J F״:[OHH | uiĞb]*#M-|hI|tt":.첅#*B#+EsMWxՕT;0B;^k:$+v+l|-XoYp/̓{} =hkCO@u;o:yoNDr3R )$0?8vṛi| W.hvKnY?d[GP"׮zd>ңm;GixX?|EIe|Nj^kV-!kGK!8ekZY#Ŏ_j^|o=a~ , ?̄B'n[BJQ5;$~%YVJc} Xy}d\EM[:;ʷ0ʯ79cROh淓{/\ANH=v%}M+u{KNCbEkE5l"OC}=oDۍ~iޡ|>O6q( EY6q5*Pqo(zq}l/D B>>rF]1l*^D^']>>wXs&-̴kF ~WlBz1!5N4[opK朴f¹5mHNws5( f~2a |/`{њd@y̷toR_=,XSc:Q g.2a kč1sӷ h8}QE nU_%X=QXFWŠ{iV+ٙi~lM}HѬ+tn1ӎE~p' /J&>wCPDA-g>HafY,PmoIp5v޻}հ Tl=/wΑ(-Jf3.t>"u%W3U m%L|%v]9 ՟3 Sa%<^:9fWƃ״JˈWEJf| rqB 8r+Sǩ8´;'[v0_+.8O, lvbK+K'b82T+Q9]us QɜbӋ7=xNm:k)XiHO_GK1\X+!RxsV5P#O8^nP~ّxMo}"IOҔNOr:Y~V> A\kX2d, 賩 UUZ.Gf1,c zаU"f/p`wK|kHR{]u &RQK^rFo(D@f&7mO*Dz}rn[:zs- {pLR|Jm0i᧴o\:72խbijb.芎hO75+*)}Tͪ? 8(ȓ>&;οFME>cS.N(nNxsl|q4>\?ςH4YTĤwZX.D`®QAb ҝ]u86iTEQ L5g[֭ G2C }N6iG"&9K:VʝJjfXo^Ff6+ Ja!ZL&߬ V.uοUd:o58Wx8 c44 9\r9cfRk<9Ső}cfEN[Ymaͪ4Iqn^YD<Ъͤ pOo4 !gKU(.:L Lm~~Xzعeo.!4ǹQ o0$1HeYW-bS#`RHˏ.s$+RŇ [[02봒7TP9Y ulj(1 ?&)p|.SȒ.>*p3ctV<ewwmLP}t-wJ7A9ogj)0/*5_!_؞pl i5/ffN~WŠf#Zc:;ZJYOa^X;>{"V&Q1$D0} (EIM6LuJ5*4{kfTlcZ>/Ƃ1Nˌ6=X> stream xڌP\ <8iwwwkq! .+9>s&'VP45813re4,* '+j@G ?,N2CwCY[`fd7@dHɅm@fNy20qqq: mN@ƆVe[cBP;9q30Z;:Q\AN%#h2@ojsʶN@ d q|wq1:޳%ev@ˀlhllkmgh1y1z'7'Z_V. +CwK7 * ɑdG¼YDhW}" {}66A ӿh81ڀ읁"yl\=flWw;JO󿏘AENDCNߔuxұ٘LL,\6FQ0?6ާP7T]o:"1g+T2Zm>N[ k 6T2:I']1oAb 7_/_f*:ZtLG]ƖׇL/15k˘dz_GS `uzw:ulDB? qD .#Ab0HA,?=Az _Sb0(A *ATjE\ F;?"w!GY1|/|/WqgKox&f Y9ۙY޻ ߩZ|'k2SGOw7?Qw}mћ$ξ;:MNUQ]n0sӭ?U ~ 18;wNpx?{\q|o?i|߆@~iKŏOt{?S<ڝa37{PWwDn^=`CZ[*M/Na L |SjUS<ޙY!Oܭle,x~Oq]l.J5Rǯx<(sƉ3څO7"Xx(BOM9 f.\2\-[i O$)ϒoREz|(߉;xF'FqPv+n Y)gwrϼ2;*vbyDTf8X:*0YRt&-/ syPŽ.2:iw{!v@p<\zMNeiߨhL0g18RB{T5 !m1}CW.8YgQbs^e9pѤL8f-m5*NʬWpܟ)d?V! oа ),)^{nt&ȤZ$dYk,JU$TD)E=>+~lS}j'EWv/ B3+ q;4Qˢ~N{OGdYA++}TR`2hwSeN͏\vw C15r2s3C"K- z^8MD]]t%WY*v$ ]<!a[1Zʰ]pxxؤWfjx|oeqЬ^6B*T{QkNWEJ'jm=o_R:X,&5)]. blr)+(^OAOlH݃co#BHB?Z~Eᣕ+WM Ņ$MH39cdsXzrϧU~6?=ιn}Cj QN+ eoUk.ڥA5,KŅW|m6ϓZʆNQC8FP.UgqVW#%*es'nMz:>|a&EAaAۊpVTqn'1- |WT.,'#Q1jXa =~%ҌT]14̇}{ -Tjyy; P'o[7%-2uהvgVAWsNGW6J}HmVO>;%#d~%e@+ʶ&mb8yaFg|ռ{vZt$4d|3)U@maNPtR F!Z&.JCb~D|s;-[Y,.$_hw6qNI&^زԗ )k "|)gvK\yk?.9DLvМSŘw+} ELpnsWGSJ&bʨxeǂo(u u@ĘR㠌k)t-='#8]H\հ{G '=F_Jiw+9e&x|ƽ^פ> K]B# z8םz9$.updIwD}8pMҞK`A"m&|x_ZvKx@: B̛ѷwv$Q2D FEZ3^o~2Ҧ(kU~ۛ*:՘j:[Ov3Ry{_ ӟ\t ԏỜi0LnwY돚$á/ (u|괺 b Q&d) ?Oc~,Xg*T=L[fE:m&g2. wwUUV^%ZQ*eTSKSw'gv*5*9Ǎ+reȫ2ڭkk#ejY@S9j8wVJc&ɵ{zYpa1r:xV#4ҍ$ ,}_88+yGm}7mlz̻0ĸjU=c(R.6b|Go;nɾYtd#.~:!V |[ri7P<) 5 '!!^31{}e~ZL #+UIP֯\vD}CwbJORյ S ,[uFm EN!:mZ1@d J{oq): !XokthR$8#_֢QަlI6i%OcEKr.;#jHHS|SNٟ4z)g + ai,jf~Jx L;5o[`(YHS1'SYsǷK#'Qւ|R`#M͵M[ҽFAvRnPuG)y Ӫ /+{-],1znmyVv(p\+ڽKskWEmp<&x-k YQSIDk%qjU:MjΞ LipO}BerR w]< C={d Fy;Ju~[6mPvr008ÉVPU215 Q}Q;K~} yg,'AF1V,aRu']x!:3@2Mƈq!V?f>stAC]! wOΡljG]^Ĉ\fp$` xparPeM`%3tO!nm (( ;ߧG\KQR9,gȉOӔLgH)+CaŗF2BTlqiQ$J=DßwM_F^ EEWYI:x WxAsr/Z20K6XQVB %}Pi%XFy_NI`ueo/Pc!QsF̎J6`C޾~-~p[eiwQݦVdPI3:QWƠ^^s$D[Mۤɺid |F|mXwa&m::ƾ*XS8kp-&]h}3{RVmDۣV5$И/ED2mP(![F?CT|WPI7\d)to\m7sC{O>QT>/ի|H$U+Kc8nJU 9̼́irh`;ߦ}Aڛgu-ePRs,I[ȿkpvbGuӺ0ةzW5Qd Cٖ/co_d{fܟMoeىJmҦucx|u1QgkT --^ce 5iB0Iz Lzoo"s࣫ZES_]ܮێ_{{F:>%n19cac³D]/Nޛgw)Hnʨ4a?1әáÖVBU8|ib -SˑnF}5j1K=<3BMd` q.s#RR[ 8 _fx\P.VL~6PX4UB~Uu١yչnu3\yC :YLkE)XԥU/Icu!x,3 MϏpUP̃nfrifvi֜~-Z=vxyDh^ _%O8*O ANX0~ & ww~MG XiT>; /1hO|Pм"Oj6XÐsZkFT׾wĤ-7{3vj2sΕm+:}FL(Sam2M)\I2 :Ì LE{K<=+]يvx|,j^ $6#z`Dh+ǓဘCqi镜Ɍcш|'/DgIYUZ$V^;R<#[v-'>/ 9>t4^°ZO CxW Y@ƙ:Ai> 0ʵ!6@A=S< uj~s6 "%eQl;œX--6wiӜv'R:;5dSrع1Ph1eQмdL=43H9)d$Z ,X =@Xa^rHrcl xNOKw\~QY}4`,8{CoRv'BK{M6;D.l d qz'Hy1$YGIٍeּ2Ã0S#"Ҙe㌯/`ޑl)s%b[ptݙXhO$W>ng9]=N )+pΧQ G,FQ!,\oi$٨dr13;<ֱ/%0QI pȿA1&iK|n :‚W96݌Q EJנAM&mMrɃi1 đA:T\yJ^_;J擨 zG9f ft_$h 8V@HFKKG)aʔI P<2o3'/q&pa #*dhfVR6-2u/"ف_̐ Vkɧ "]  d˂{ w*KXi*pN6㤍@72ҵN'M͕QMd^@5L6P7H]5*FH \k!olKg Uz3#V9 1[ݘگ T4UP‘iq`10-\(ɉljQ %ڐ^1j. -=t8OU% =x0ߨOr`EONdWad?};`0|2m<ڈ/1;ܿe U?mU8Bs|He*A,ƧU&N. 3+TG(8۳ى:?=N WH#ۗjf KoJ.0 8z.$b96Sy@Q_Mi<_Lz,w@C}̜T2U2 t ۸DXA}{Iv|AM'4r/b\) g > ><̉@)l+5\TIy)9WkFZ7o:1"!]m*KɓB]`!7,QZ%->>:"f:Pjorg гVzqTQ# `rS8∏HѐT$+gJƁH 6ΈQȑH-NXk|R՜/bl!}sn0AI<JdfW@lvt6.w٫LZ=/m܇Թt_P"$ v)yx~ MlKPŌQ(I) ?@Dל:. @A4;Y6b׊XSәY.UVw餌QM==)~85Wb9Ge^ ^Ecv h*J7F Q%R1aaMڿ8SΏ(\@ۥY*LDN\ -xu?rraO'٣޺0}?~aRbvR}T)"n{ 6"+P*_V: +yG+,a;:^꠨nDvN5A: iK}\&n=HW܄HEQZ5g4C=dLvkL9cAs2IߕnKsC%\2?SAZQ{ SdJ'M@P,\ZP6B)y=Uag3DZYbP<(H~:b:g HwPo~ARYͼ]cȌw5y5wwo' ύ& ;DZ^*\2BJwL 3?Kn$$U4ROj!kSeîpx tߵa[X9vA5"}J@r$nj-Iepd% 7w^l3t^6=`xz>ss4SL@2:fw݉"ihZc3mJy uve'$L3.Dg/笹FV$B1fa)k2agјhup~k F[dDG1_6Ҟoxĩ#g CarP8o=xH00ߧ_ԯ$|㎌s*_P#X#$.3qU?KPa>0=OoȇWZ VY*uX#_ Õ~T4hrգZ4" 45] ߏ0%SQPm\>' 9[/Ƨ1jPCy{qo=*RZn10QJ衄v93 2GR w$*%tXRŹ%=x hȋa'e 2 aa?pWLϣ;A8hiE$fG~\_ٍkK*&f{^@HlȊ 2 G#1xF7"iׁd7ju}y5S+u;6Q˽V1# Hw3ii;1AjB,Rw/! Ytׇ"iNֽAQļTrU|FˬiݎJOKZ5<"C+tϾAҲ߭DŽۍoZ7FWOܓ[JbڕyI X jF;h8ֈ`"gwie=VDDU %mB#aRɀ!]a\|;Zmꕴs@Fŝnk67L`7^JpW2s AYVK({-hD>"G7 14eh+͗5?f|f|\!=nDCvnhe7NǴZ}lg+.N`ү{m {M73`*F.Y)V<4WŨ qggR!#qpƮ4}0%i>tBo&T& rBFYWϋ HyT6g8CJS٢y`F-! H y*Hԧ\R瞘"Ecl׻z4!ѩPj e^hK|=J2I*y^@o4f ^ *\R,GScºwV%m4Ai; BHKbcsnkz.] DwG4/1CvVPuĿ?q<61zG+PIy!e}ק%9Ϗ )73ڧ`د]$l sa|dn+st8ǡ`&\(YcoӿZ#[D@)@w_2D k ѐO}/ vR`_Q8:+B'IMtUSl˅ ׍ |/Vf%>]_C 7 FC9pq¬{УtMLbQTJG 'aUک6.]u!zIǪ˱u ϥʓ<7gԛҺAd7Xa!3"m=\ZN{t{)/w A0%3ZU"#~(ɀeP\P"A3yI-/"_isΒhPIt[A L C?<.&[;CD: Jt>ׂwas9u $<8֠&9}`zR?iIAkrZ|=$zqZuDp11!qT7[~~ՠ FJz h ]yrꀸCz@c{n8yl=aq ,r]񑏡j%h|)ku΁ s\S^ ,?YP9*JB/K3 \yS|?T4ÏA,w'71owZK-2-%z?'xBgn})P Spk94X!j5'zcOY%mPUd`BTy! ANK[_7$wbGHr]tFU;!^r5/@S-l䯴PYat&ח>˹.6c9xj4q4oO~Emu-Ar6|ݻ93OVup2 &oCȰIk|ɉhZʍRYW)@AOkcXTuY#V4 i1˓LSUު6Oy/aD>b'r-K = ؏𞘬Ok جYԴĈ4yE\XX28RYAc<ҸyiC/p]N>qq40k3H;J*3̙JHIv)( (pH@P\ U (9g65VfHaDcШ?UYxOj/DZd0uLlXS|]TgPI?x"84"D4|߫.5cSYP|)&b,ighmvn/א_8JwΤ`R R19cZfu&N:O5/>}p (J{ڣsղ4EGM=.&V!ھ}a;%Ch\Y'8i᧳YzOs_}czUT-/oR`s8*Yї݌0B? NG6O/M Rָ-x#Scq GRP#D%ؤVomB# 9G_tcItWyZax͵Vκk9{ >P~`7Do<,KH,h[7<v)-s^M31ZlAjLSq(rG_xӚ!8EUr]*Η, ozdLehnZ7l`ijA„pY2|xF֓w |B'PYfÓmqK~9תdPG9qG=|$˗G ~WvED9l.CcϪoHoJ Τ6/#ճi4ZD'M? '4%}dyM!<l7Wq=@+[ۿL,FTyѠFl MIk NLBڒ$5': 8mD JHVƕxufL" %ǗRyc=2/.-[:n&R!-F醖M\6"h%~~&V(02<,;ϭ8s2ɾw;!|ѝ`_̗B!dz0' F;^e@o3Q74<:ҹ^LY;bu?p(Ot8PS +-I=ScSÄN>IpX@FO$DpLdA93􉔢B)HR6A{@DKtIE^ t!,N9 Ht7Wu mi*  se7#֕[n*)Cޡf2J4lx%!v"Spt˜aP SU ɳ6v?{ nsw:W U%ThyVnr8;aް%CqnG1l\Į6M+Y,HD 0H̭bn84JwG;\JZ͟x@jO+C}Ŗ;R ґ)2#Pܪ89{O MJ<V0iԧ>G#h_9ԖJ|,ZNT9+J77ʰDޗ.*@rX/ B"#96Ů}o8##(@Šg}_3`JMŀ c KvtRoRwxJZ#yrzͽ|<NOSږdo Gl>\ë |XhI9KɌSUMz[*R/S:uv-^)ؐ{a3k˽q7 :,vsTmO On|Fr/NYɌQጎ һ90f[K`* 'IAe{}dӦ>W&Sw_=w(* 80$n'ӫQU18|:TgU#a ml. v n e6fSw[)̉VځbR0 q`V̛Ha՘W\f5.o8 T釾ѵӘ~SdaT8M6]qz0YXbpŎmF! Db5! $'|w*} K{Z7`7 $>~Y{B%Rn%SPn3j :'Ľ&+xqXf/\:J洈Qɫ8/?3y'~KY 5oaPaseg]8DgS^ftM-AƮW]^iheR yneoMKC,O{l0Ƌ}߼1_j5TBq[f@!&&?V~ -d߬6"\ %8ca#1FRTwuO [\X6in3(,T[5؃G:E~> P1y$vddn'ݺ%ҳ^UIèl; m| q5;KF`3^ID^pm7AI؁]z208(0:PoHdLH$j1;_Y^nX>617A6XI`K+jEۣDP)Pk)Q>KuB6QΤӲLQ0*:!i5n:sJ6TFuf9C`;A {6#OS4JVCV^CCKC&x6Ig;igzmU3D8M B"?3rgQ3,5*`-ΫVYX F9\ :wko4΄DA0@b{CEvG9ޝMendstream endobj 281 0 obj << /Filter /FlateDecode /Length1 1492 /Length2 7410 /Length3 0 /Length 8403 >> stream xڍTuX[DPfF@a`TnIA:i)9߉?3]^KKaU@]@@Q3]5qXOa.п=XP3 G .wTuj@ !Q  @n0K:7@:c1"=0kA !i(`ݎ=@A]_tD? P)x7WW$n쮵`N! b?*ܹևxq>>y`&Oᶩ(+~q̗>#6~L)l'5\t*害+jks>5AN8;dƶ8KCy&k7nlpLraa>le'Zi/Rx~+DR8dHluR>3uȓ_Ű4`pԊ )A/HVyHGp 2@oڣv.-<8FKP1DI{BK#r D6ߔUJ}RGN ^m!~,sa:*RDww[ R *?{PRyR7\O,C^k2qPw/|ՆJ߃Tq^X:䅰azo &;`UE!Kijtɤ\. 5Ue=J_n!@e"A$«Hgӻ0wsMR2rV-ΦdM+`bl&€%kvbT:`=ʳרkuhm>Ք ]~6gRx^pR[ DPg,#||A譏,͘#/)el~W[Pz70X$@ٮ?.Y zJ0\1ЊWwg:"IbK_LvɷN3] ۪hAU`b0yfaeu:5Y!1->mЙRu ӖB)>=0 @9S6hD]=]W6:7 jKn, o@gXw8#񠲃}c8b$oUe ODC'XVG(a$)iCvڱ8 Z@*žH1Q 9MYG݈ ! 5X0 hcrL@Z|)A::lGaV`OB7)J 6^eP9-57"6`kw-wR@I: BKZ1/Gu Kbe nD D("^1:dj&2=T \)Q"Gd]k kd-Vq[5341}5L|t|.7٘=yA֔}|?If 7` }hk7g2m "o}V Oy%"}Bh+Ln1>ϭo_\4H5lgiAѼM,dlUi (^ iq>xn,2>6|H[YX >$۽$zFՔvnr7K۽U/a~h#$b3ː]dNK;ye+V$cVM;էz KH&ш9z[ ] GvMr*G3Ř+$Noؐu!dzZvrbӾe>c|].p k].. ^S90&}þ!?:fKTg1<5ؕSR%Nr7"x6G YATW~q5gxA2kOo12nL BNN|Ϗ@aShԓl{℩n) WK$G`ady{،.-V_P,1?f(mHLIh- w5,&dw{Asى- ?X˞.aq Ѿ]=Yv᪽1|]"'52hM7Xn}}m<)R0"0wԲtWlUmFxLi)\nͰ}8O8+K phY_XhCdzXSraV`2cU˅A)et޲/͡fYgvO|=OC6#ql},ہm Qc&c^O@.3'`|NP4 ŰrAE MTGUEI kE([(eC5 UŜ<P5-/>>MIO򸋧vlP&-vnt[:w+Œ3T6 zz;~QK(*N IѐK8Ouz3:BSzS(qfNh||)IDܞ!MSB2zqons4"ZqSe4:SPO@joR?N (5Kf/(R) Ou1mKܝha-}8q*x>%OCtR$MC2v7Y@S|+Y5A᷉-YUX:tzH%n,&= 9tK|PM-3F޳dN1t4kg sMiyPk$;|ќ$^v!Ĉ{%X/)#k0!WXujhG麥0M|caS k[Mr'9{kyDf+!>ԗ(4I'V(pY(RK噎bGv&ageb&>/nxOg %5>s4PiZmaB/v81=VhJ +ಮVQׂ (ts >7$C1~r)2sjNR 0澁[ZNؓ<,5ddЙ:Lh囋ŝI]TIX܋V _v`\8gKW5uoc?EYؚ? SpqKtݤ.ʝg晪LODedR`WozI=UȪKw(͖}m5 {pwKۊg;r2P[|Wo5]1ߖdu*J*j2 c@*l<ֲ%|))izv~+7Ȟ3ELFz,J8؄j/:kP:sv3ιo|!4(Qaw]B8E|L>Ρ-O^1}uݽvr#]YI4C;3p*B{aD>%Tڵ̄ErNF|!_6ܔ^Z٣fTd&͈7ЦR+e!WF;mg㶶lk?Y AZ oր ꈅS:m7Իw?aaMCˎX,GuQ\Bs0] 2|hýUupFXAgQmn|yB1^P}^IsX23p# 򧀯njBVB*\/PO|6VagSAk֪tڨgi__6e*HԦhBfΚ㖵fMd 7W6ϭ>6]Ǜ '(H^Di`4Yx8irF: y)rLX@o{}2mI\+^z楞lD~ ǾjS1뺯}MTߛɑLX+<_D F.驟qI`TxƇI1]qc E hQ0-fAF Jwf1$V8J_ysSzM}X } ZطK4B@59+ Sa^BlUߞQ3w=Ʌ:*,R4~9ҾR2 )wLаIt,oηi=`\g$EP*sqcÈ+ |)xòu9OC%imJ'JPgb;W\MR?x'*0^aŊ;]d7D "qtZ %&BC>ˌ p?aV{`ؗoWپmRO},zImT&utQ0<緵WsjٲE*dIBJ|H ÜoFoxD$ʵݬQ*œ_5obSf0}'4Ə6H# 3%%{祊O T։U A{Ywr-.u*ś:6,+"̆ CJ\n4x׶贩ބ]C$sfF=qjg[RGQ{2`aW/|(d~#B%GLeAT/ Im ~zX% ]*Eΐg5j$5X r4&9R^IDE2FNޜ_}!?%ltZbR mHbP) V̮D7g&''8vDvjer12!gs`r|:O- TZzV<}G%>-5+@u͡cc_)ݠQi+Q(a"11xyѬ42|`ӎ%ˣqou,5*˕D?*TM#=?hT Hh9tYd2;ʩӘCowdD$֓O_E\GϻI~2ܰd8X`%+y !p7p)bAO _ j.&1׮>yN:}B\4Ss܆hp㉘Su΋ _XA8wزnv*ηnʷWҬ+áܩ4 (܎ YtaVr:E_ /r6KS}2_P LIr7XuukoJL"|蠣^O .Sʍk.4r$Bx/lWuZVjhh9nF3$e!]p\Ȟh26bQ"'Z}«8NB6cr|a ˾VFXͣ$Ma:+nTŬB;&/%^!*FRB/wj(qJ :s#OQ#?zYy"2 [Y%M rC/=f %Tw?#ozF-Ol: /cN߇<aM6XX|Qj #\#zX؏s|r=j- v6ShD3nr޻| 8eơtq3{X6ս5ҳHbI:)JܧOע`#LjU&b4J K| @:b2p'/=ݍuLi{\m闹yQߠzv冪nC5 j;2!Ɍdמ ? 5D`ANXWڦl iG& eo[_˞.aQu|(M7h;{W`sYU}x+u,[GVŠ8s+5YƧbi#E?t F~hWgl5ˀN <5@L&s!?L7(^>vSt;#W'sőJW? xve{0XEah⮁r0CtRK~\"Y#atK}cƶh [/sOr^bSƿ˙cwz)I{Zp6ST?läe /*IxѨܪr5ZEmRuf;;T`m:-qt4!Tڥ~Kۿ#,*Jc#HwnA kQ/)xߵAe)ieFpw* 'R#mE`a<,/'P){=Hr!n\va`otk DgqW,MHY]4=7S3Z/ mASC͠w #|TζWj)~05-}~Zg#TUZհ^s  QOn 9|Xeh aijxى/@ 9TTMRP208l}Gb~+e% Tl;zm͞}:猣S9{>aWq?SX-Z>PY֣[ 5Gm d{cOVؗ~>OnN~[ VKs@A`|є9hC>mE4Ԅ|2z®Wuw*Ҧg半$hvMݟt03+.1$\$i$kT xy$Q8sDE(T VwWl'F~ԉi%Sr6$=e̡}^5>RH*=xQ8=Rzlp@֓eYi?dHUYwdF'skVDds0|'eϧ"KiKuunsߏ~+A 8SQIs0u9c4N+~PJj> 2g!WnBF f*DOQHD杳C9>c}El)yy6$B| 9sOyg3Y_Gq@YCkݔ Z19S+ -L2&F/ެ oȫrkg %'z zQ>6Zj> stream xڌp] Ǎmضضc 4jV1۶ݤywӞ;Iy֢$UVc5w4J::12TYY,,L,,lnv)5.֎|M@&n ;G`ca ཉ9@ twtvrO-_Q{@ hhfbPs4yO+77'>ffOOO&{W&GK!Z@ t~ P4S<%@ Y\A@(9@MFtX{`ebo{d/g33G{'okK$)0q0mhb7031 @RT`*幚X;2Z.wP%nvdm=| ,-~aĬ`yodaae@/3+սR*urtX[[Axn.@߿YYfnS 1 @{^v5_fYii-M]*NL `dd^2n lbo8X8xa j{{h}Z ϒp^Qߖt_ @K:G8_S-?G4vZ7!:X֮^@sek73+v*;Z~A?:iق M)`h8&..&!'t@-1 pt=Q.No? ,q ?,_`XRYb0A@\ ?EqQ@\x@\ ?EqQ@\ ?Eq@\ "^4@fElfc 7s$@?6w/'? _߉&V@m2qA o#/3;i|,_i@d =e-󷹣_@AP?9@v:e h8APU)'2'tA;!rv5?jP0'khg48@@J?S5 F@=Voln@sSGy_15VPp9ws9='(+ߢ@=ud'-fkA}tt/_uy{Av!t'<]@#s+tCfKf65mD <g(Ri}\%Vgo܊& uH܈,7׽ oITi}{6: 8?Qx,ZGGȨ.d !K΃q+UW2q~OeK|1F#Z?d24k ƍ ev=gD3=I {&[ÜWu6N< <]\")*_dYҢEf"$UFTCjkO==F~&B 1v5b)aU}pn $:a!veaB;]Eұ:l}s^gr?8U"'LhJ'a—A]6ѩDeXHy6  {g.da%asbCFgoj(J?恷TR5@ta}РXHX/mE!֑ygP:wh^NW4)Ow;s=pԨy/F~O ֳ0=C:2CkW_NLE~!Hjմ)ZL=cq28 %FShDPXF._D6m%H$X_y#V!ߩN^8B'3WRδT@Azޮu5l҅)xy60zh;EhI-´a>Qc%_b(K䑆2}GF|EEf^u+冉O93y<7/=GxlYrғLKZ_[x ”q@@1+Klڛ@Y]bznFQ l}oCxWFzI?x8X爮&QҚ]Y؟5ӱ*kfoq_̈)7[Z98Pn>H_zCcOc [fn+1]@ zuSG~ߵQJvCMMżd$*%&Mk6y␓a9"dryOhYVue-NWƬ6%ύoEs9Lz^knYNLzjl_ne@|\+5k}diN^x:RkUcX9FRw[G4ĢvSƫ~N]+er㍤rhg}u+7!+U $ n1;9$Aa#$LW+Iofw; e,_]`bR}&ڲϑx͚J^5 Aw.Un1T<,8'_3X& 'g,8NaʩSTNPM(v~uޥ!$[F5b>U9ل=f`a ~^,h'Zz.ޓV3ˁH|s?GAp%%qmLbD/"^ً#P:,;:)yLT 2vkΥ-i} ={_fNBlܲb/W̐Q 4e^X* nBzUFw&pXurgMp;^u. 4*FIX@ PcC`MVXM%n1;)rqkh{VM+7ug9c4;zzKa05S#:I'?Hge`guJgr(͊$N2ŖFe `XW3-hVȣz*u\^M8 :ResļP1PeS8:`؟D86h3? !d4 Gqr ZMgZΛMZJ&8XNv ڕ0nhߑ1QJxnK8z8`eIh~/g2%nỘ6#-:N6PxbgH8)!*KFzÆj)wN#wKO(nѳ1Y+" ;[[ZQ=)</N7KsVzBʸ8[oY ^l:;FqA="zslNNr (ꆆxXπIbR^8b=7[v+֫.WfMi9ؠ y J&S\Ӭϊm xV|g}v"~P_OHW# NMЋǣPJdt{wA [_ʛ ;y*.m!56ŁNXS0'_$NSTOgrBRnӵ!r~CHRES0َdp1K%pm>SW?-Lvc$sz0ךʿNB<ҵA Z޾qktsJg,\vM:k2S5~] %$Lr ^I;gCVr*7pWd+}`QP96DT-qR:ssNvEsYfjzIPb?}]&Y1oˣF _]-|J>c@8l$<+WMC2[p>_}Bߗ 3rElhw"4hOD "X@$|Q燎"i!ۈ,yrg8IS^7I}Me2:K}Дߋ)Dm ewByJoD.Ax^ߚaڄhr  9MJ2(RRcXEWT钹$V%(ysNMkWVMyvfVq+LjSa/ MXw:1zsi}E_:[q8F^]1!~CDP4QdIBVo4 ydXg|`5}/w%3- G n7Ji%#xP52=i-1Fg6mȋW"9 e:"16$AJqULC?t/}\‚ z0-JEz>՚7.&>fbW^b,>?a=pyªꜪzf# H\τO}P []# ^^6KuUL}HW-xLy6Y|ofQڮiV, M  !-VTtN!"KeO@G*~Qsqc xF8%KT>X#šVڞ*r)^GQ3Iie.j_:ZC5C  "s( .(T! wFO}( _5/v %~I83ϥfp%l ;(^ЦRWuXUd޿U/Bzr(j_s P*O:: FߤՅׄ@:F#7a' z@-'LRLl5w{ .(ͮⰽ?0l-NЗfz/ct)ۚ$~HMA(ms~Z?7:u2LI}GĦN%ZGFt#1Bn-)FUj'An5ŒV='MpcH0d"r YT>іۇ511sfC^rH$z,}4)-^5 ~FpTtq?}8n9(.V$T{ ,onr"0YBdŔ:[ ihN^q4@Sij s]3 k^ӎ>;uN?[y@5pXlgH?b\4HFTݕ#^믝‡8{:xk>Fc?9CaXPBx*85^MU7C+w_c;5 [ QS}_ҩ!+|+jzH5 x9s!QLFauЦ@唗5 ,<&R6O&u) $IcB[0űgthX#ozz ͏G1 <,lֽ.C6d[BvVyk( B?YH8X1` BIGr P T<S{mTȷlX4_ubܐ>+ DIgnj{$&u2<0._m/"K ?cWJ~-y@#5#hWGrѣl82{72~;F1g#̯%7* iPa*kŰ뀻ojȚDWxH,C"/a!?$D-YΔ(I+9V^C!u/N%Z)9㏹jcELl&ľk7D(Z~QRf2wv;RMMIbӰ`Kם:Ui3쁝daŨq"\~*ԛ_PvW-t:e1z`7O%~&1hȜ'AO"]% Ev8o^FđfECa *6I[yFIKljF#!lS{#ѐۮ/OGRcEW'%2M| ٥UbDr_Dٛӥ |[znqx隐+B&Z&1%4-Fؗs$e&Xz.NilL{1=AϩNⳆZ LՆa~Tz6~#!Ǒyx}nJ%zI=1{uYHwBn\a)`0nlTN_Հ!ߢ&0+)p&>5#ewQ=S~#GG,8\Ί1e dnݻ$9ZI؞՟?ŵj6Iꚶe82\61\|fQhqX!F~({X$P󩣪qviK6?S\ OIz;fzQ]@ wLeUoCH3ņw^ bfbLK_w: Zo(cz,?E5.U}:g&M ϧ5—vOa9͉cHN҂"ܽ5Y6ɼ-bɞQF~ 'U l"OړJR8n {7߀XǽnSAd"MuIQfw w%`ߍVDNQ}TUU3*ܠU;x߲ߖ0 >Ұgb0I @RVdwسwWd!z*#u˟OlX~/Kjċ}؍ؾAz3;-X\6ʑ&']fi vGA",ļ "W|ӘJcG}9p@_4=4F(PH(ʧ]4ZŀU]t( tCՀHrzCVT[CSаb.b !11ԺȇZ*(N0݊?eRI?'r/Zc')?oly[rNIP;VnB\* :?T hW`hU0dX2J+oqu%fʡK:eØ(qex`E7(+ݒx4<#]PtvpaRKGdք-KJhWuYC"xԶ&V$7Nq"zuL4]`=NjJ~Tlӯ =VlS}IFU ey`hZ֮^zaD Z1/ 3>5\5<`aF֢ jو5lR-xϩ1dl8$PD&ފqZ_g6`_dyg4QcMrØCcBs$YJ:yUp`?;a1ٜ u쏘fIRMc.Kn묋̙;wjt/,q *D26J8:T+3#قJ%(bQ1Fg8/ h&Hu2i2feE-<)47N6InЈ߄YH7THo^*jsEb4l*Կpmpr`Y*fʃn&9 E1u1NJk1 q`9UdIAG{>'L ET.''ύ6^klCM30xi9JeO/*ii}WYB? >#+뎏Ȅen"n\Ds <E ?s !5kΗ$37p  ;ٱit6Ă\*yM&UA#Lh7ȅ%bKnˋK,%C(r!{ jk1ߺgOĜDf>kg_ׯ^T\OI VDD ݂>])RҍSs1edN8:aCX`* ױfWx=#J=BKp ݝR ώuZhk Ceخ;a=U_y2 TO*AӨۑD,j+H,щt4- 0H*X BnhguG>>L>1Ց@+o[},Kꡛ(vb00U}xf{%w̛Wo͙۬_/ƼK<$Jj p<|́ I.ܿE#/$LZm xI+~vݴ;:6Oކ00"+A#?"CM$#>W]v+^%qd&= Xٳc^/T~6j\%Y%B^&M<' dŒkp;ܐOH#=V9{QH<RraFVuR|d<3lAT6~zEvpaoEZu@aB{ND+  v?)mW/#1k[Cg wk]ge[c0hځtOU48 cLN) %EQ%iFZF/GFSNhG&>O:5dyQOh(`!_z[[Z^1qӶMi\J 1oXB؊1<3DIm_%YiT\X%οs_k,'!n3GQji!vmr=*;-Q"$ԀhFg 4VN^ok5$ĺsWk-2Y &X :h}K!8bErrbV=(H,pb]W&soXRfBPɀ`7Z1w!x*ih(v~#LEk~ G G`dBj'VWYdXZ @K|mX,Z |1|2Ԯzvh:ױsBTlVs<5c]I,rL// ,fC5Vo^]뉅MXF4ȇe*C*ƪrBiՔ⿩YiS;TuV'gJ '"QZcyeݝf"T1Ċ"/c 1cǭ#|>pYE3@r?[0N{fs &J?< "UXt%ok$~9eOB!I]lv>qȣfQ :ﵸR uQ~w˷/G Taטk ($W|I$$ֈ⢺U~ ] PVA#,%z2|PȫfkgyўJh F 3p8ܕ&ciVs j^w0HF[9㫩3)v'cW{X(L]&| 'CXn29%钸P]qExFP$L›zmnh'>vCX6f{V1enF%A ) y0K*F}U  R̷ݓG0CZ0@.8%w{Ƚ rD郂rh̻w齜vYP}F.Z$!vvS %VH2` * a{k+U3̡S~XerDgȶ%Q& ^ o."Q0͝g R XN0zÐ>v |>t>eK&r]gFdep00%p|y=CZ6ِehPzNM8zߝ4D5}ڭWF]~`hNp^R< қA-iji"T+`: ω[}no~;zb)I8l-`h&1Y᪋&0oC?%L6'tJi$wgqՒ;JJNzLB+~D__ "5ڿ6cH nM,;Q:9݄'sR!K"Aؔrnˇ zZ PliǬ4_8>#$Yx:iVoAMjZuW=gg,v%WߊroͤB<8x>4Cσt*2~7᯹pԧLgNAA͖\:~:kI>\41]=$ S2'R<Ǯ爜8,.bRTfju>LW$JgO-[A\/}0*I 0% J 7N/QMS|};OϫCGց݃˾eЊg#BHw8gؕ Pzڴk]?eoy~t3rz"@BIa+ Bq`qYuAɖ3 ϒȂH]^;g4/ nt$g*kG#M=x[ט;IdzU3;ƾTFڒ#TR>o)Iw?>!~5u mvAzGg{% ׏Rıpd"h)V=Ƴ3oI;MU9<փF +3}تH:@ɂq429SB]ER7,;>-AkoN~ f;&6g׮#`#q >'lÆȺWY\ iYIvEcR\:ִ<1k>c ?P\5t銋8<^m$ reB6|{v.ttn -OJgPN%xNj-Uc)̐FdQݴܲ T D7|$׈Η5]"S~.0X_obW'4ۂ8hQo5r s@%# vhլ@\'p]HO0s o4"?LJ02ӐpSs-hIv+(xpmr* QWQ~h\\qLW`, tc\I{K#Y F ]v$kn~eAk| ʪ^CW/cʢsɭKC0߅b}G,$Qx(ݭg%Tߏsp@CR:!(fW1Tn ]O\!ԐLR gQk bcc:IC97N̚=NlTv yͿ򀮢&H k:YT@x$7QYKXѽ!q? P``# KX3yuE[Hb&6b5Fa\ܫH,PHԯ;#y5Ek[$\8?؏ݽ$ȶbR'Ij46nzSHڮEbІ҄ȏD\vvdb .>-|u:[tSE:NTc a 5ˡ_rDWۓJ KP4јdyde35*+ P}?@ϏJ(>ePՈ% }eU<xe^J&z#F]Xo&pbo4uCSL]YGA-YC =,8^rf/SҔOV\#Rj_朠'ያb␳$*Kn|.~VS*ƤJnP"gbr8Á7̕r$x f.?kD $rw0NQF`5 ,|^ȵىk3Q5]5qVG&FI$p =>ϓFg{ mҧ\oQ\葸?HY@;gCx,\{`;e6Pz/TU c}ٞkaWZ59쓤U3 0 ezOZ` hϜ[X{d)5oM ]0ңՖ;Y}yƺf|S Y$[`Xn#~6{VhtWthd'4-(4ҳxw,%ձ8j Hg!Ξ0jjFxKe݂h^_̹MN;IiHz9<唹R5vYED hD@jiTix>V2btB3ˡXp7-%zE-ֶ9p%bWl&4/ިmLpze͑ ;&{=StV0y-hǔ֗03G+Ya9BOXC;hYV!.|Ne&ku_M*NAz{,rHQMgQI`oxEhPrZ1| I26ߎ>`Y.t'P(*Wvawdh{6aHtGGq"=Oɧ4-Θǹ\L$dS6]ڵ]oU(8X즧! `;+}>ɷ AaLtʢ;b`/ ATU0JAam>#giM,-g|}iowxt XV)526˫.l4{?q`஍]v>3Ɩ,c0)~{"Ml>d:h,gV59P02bz+ kB<șԆRg֑cpՉYdb*XE7 smD\h8:#c`>"W&vM%ÆFQ1rMͲePuFTt҅],&'wa E{R7 қ? ^vy) ۃ(5<^{r+l{pGj=້tp8_?J5<~Թ:ׄl:WB$dT=̐pK%aUNhR s#6}jÀVޮ6eOl7;`}\R0xtRBg~PՖcVQ1pk= ii{0$e ^$$'ELj#DAk% c>AX}oaZ;lbHAjߊsh8{a:o&C#PoDTk_B,ڕBxΎ0`'?*ydD bQo'  [[#)3LX%pYn9-C/,XRTY vxYW"9!Dᣟ$X |v"Qmڗkwl\2hW$ɓ2 D?#J^ `R; n*e9uui{0ԇum;4- $F(zsHFEba>窱s;i||TWXDޓ8!\EtYF <-ܤe齍s@3m&kİnkeU,-zPnDTzpc̸^hM^8QH?yt =b!zId9,mIbiz%]{WRa[qY*`ZU [:Sqb)pǐ=2yswbr,%#]jib icuxIH0/|fԐk'Mc/ͦʪY-P.T62-R;FYkwMD ߏFx h@,R2]JB3bMU, * a=L臯q5_(HkڥB"ec%.V@X`9p$ (4*+p>D)V&wi#f7xĀ1DE3Es&~Cb͚ӶDl4Xn,!G)lޯlҌT h:QU8BFpr_I{ɝ8e:e#k4_[$0)z6bYZ<uߎ !u/h]8:p6F/+$\֠I6s;i^H_Z Z!+uX#|'Af]js`dO8  C n;*CP#08T R^60_:B$;Zփ]*+[(h-jAmwl_c?MYn\MY% ` RXNc))s\>rˆyVgM.d7ew0nֳ.$iB15 %ネwUa!{2cD`R!-E82<aP'x)l7d FL˩lgƻHf|B9=cN- dNtqF/QtD r m]w*,8'Jŭ-ˀv53%Qv,  ֲe 7Sz!L\I*pӗch~jak"ӴPw|''h2J`W0˃Da>0]6iCkܾT$[7y̫!Jz1YpPT4 !PPC1όq Qm>g$7ce񰃼dScgiV]w,#(oMQ 4`Zdwr[6lxԍypw Po g-i;a ٥%%'WV* U.ZYmP"x_[f#Z;8""0LO'˭QuBb@VqP^ 8o_?{MtZ$/]HEk${udw>eUEAiɜL%Bw h)dk=i ve>&T ? Ig'i 2aGq6{i  Xx?w>f/'22o}ekG9j4(FSp]G݇mdC!^0*O|ͲAv&3k=œ&zGNG,ij-ٴh+$XGI@^0Wz2)Ha6m+(fE?3:/Ă )0ANut{B2"s. wr4!NsפZ64!%Ev#N-GڔĻJ9ɘ/6"d֎z'.#F֪o=hc(IKҋ7L{>8x>ETa bLY+ >]T .ZkC=X2fL\af~C\ŭ+F*Ӄu~ cp,TmdMW/'אGƾBNKǟݎr400؀S;O~O`@Qz xmۧ?_nRdyG:,T]2Ыq+设6J+LoSI@EWF9Or@9{#g@?1iF((;HLzv:=:`a~GTEq3UϽytAIt/ݳκB4 c.xW5;Øo-NNjQ@mP[`1]mE @z vlpHJ-PZl *j_*Ǚ?WҲ8AF61{VAe|ݾCv~T=¼fhPԘ=&#a#or0>iV\GTU$Ɛf>8`7`X6 f2LJXIBӮls43 IȬZuBPl5 燑Ԥc F5 Ze!5c\,D}a&@&L` iLV Q`8AV:)nq/w.Ō2~l)FUМh=@U`))BΫ#`/2-C>"Ci`ej=~!0Qwf |ы]Ak'+L])mT/rp%K{TH FkI*pKpڒP_$JM̅dƩHKm+0ó:6>$ Ok|)dSw78k&Vp'moSGO{tﶯ_VH1l'9*/w*2"Q2<#?jpɀDGȘW0D_!M%RRԼ7-=A(jCE,+׳91 FDA!)>i&nN;E[*te2Qm>SyD0A7Ohyveͦx}#y Y5¼z !:`'FRW ߔ9 `dwb: v7F#lѢ\0^Ֆg A,)xϬuЦNIc:tP;$G,n*PJ%کj]6\ XWEOIJ(=weAn#v?&_؁/<\XpNX[6ksU,X[k%8;dwqtֱ7g$&v`::ݎ[4Y Sk㎼̥sqLX VlX['鴑1vQ! f|\ Vm ,W-E@/)9f%N?v/At1)Otp?hlv2)m͞@"B"YO~ag_ջcW*RY)>cPy^5@FJ( '6ӌL5r[v,\-HT0BgC:!H+8za+3-ÎdJHBj zKS5+oȻM]/@c*L kUSlgyyPGݣ wFӅx,Rd'>>i>Y찆˳ `&rFpKK"2}Ewċ;)'*n}(uD)pe[t4[mF(=XѽwwR 8 TdXJȜ)1R~ڧTQbýnv@~qR=pX1n#iB'11ְM'*el |B_IDjn6C1a'v~f46 >/-]B0阘9c|,RD/1B8!0~>*=y,an|hF'tIk_<~`؃x`7pkt3IM@϶i64?J=Ա\/01uֈ;3AY@ ħYX .?4ѰjGޯ g%9Ǝgoˈ)QߝP۹~mO._K/4H]>gXc[NCwgt_UaC~rǹӷ!1&h"3mQVEJ J1g+|Y+h LuRb/b0WsLX`D9!U^"N) > x1 Sbu7>e8h>8pU=Ѻ j+ :{]/X ϕɿ{h.\UY/t!U@)N+$$'9ShRs8s$ekH&''kgR7Մ b݅aƪu}b{P>ũb"R9]rC`r=LļqmGlegCuR٥,]^wM!5F0k%حp$\. L@2'Enuc"#-5,B.z(|+ 4 GjD?#grl`.\|?S@~+e endstream endobj 283 0 obj << /Filter /FlateDecode /Length1 1744 /Length2 11160 /Length3 0 /Length 12261 >> stream xڍP\ր5HKpwkh\;; -}7|9*LIA l9102UXl̬Hj 'H@#l/ 1U&nj(8X,,\Vff!qc@ :"Q! K #G ݟƦ`[{c;wd(J1:904qlM^ , ) 0~M! {'GFG=2%@;'G?AZہ]}03?^1'[RTdd0r0XXX\71:+mgU9dgo, 3)#ߊ$mlSe7ٸm:N[ ~k kuf gv2~;׉fadK r͔@N _r?dT;xXuL__" BVlǮrp!cw׫~%'Reg9ǽrrDEq!$C~ %.?3^c_uD,&࿐ d/|a/|-_Z?FLv׼k"ȿ50y\GSg羽^(4EZ^ZUwe؝deG Fܑ pS㿿b+XqyZ WZ/r};ziL#G9g֟]]:Flvfg*îs4x2>S6WVXS53@OFEss@vdvTֈwJt6u h0c/h/{CN!Bt) o.Mm1jnft5M;+A.Y h۞Ҝ]8òֱbi}MC~jzDyLMj|eEL&C8fĆB_Fasu>~r>d3褅߈d>ǚ 8EW+} ǔ C}8۟?@,47R;F~l(|S7 odۘUYl9G À=j$%9P"ܑ#8LmV`lЫYr,8&\yù$O搠S#6: Jr@!5gJW|,O>Vے3.Cj^2us n\ :?`x޷P|VpuKyt4'xD|D>)F ;iMU[WhXzIxhcxqlV;Hw~vJjƧ6gښ=qFqM; lUF@ݳBJnOFd]dHe7iu9(saCİRW[zliƻ,ԸF,j0/ܵp+JI }⩫[ ŀ'?b̬9P۞( Ei̿@H&Q6^FVdįܧ7 o"S޸M 3Kvtl5%Z*jqaU'\4Q #BR3}=7EeJCd2L&pAhEhu=?)z–}4Ԍ4ɕU4?71Qed="uB4_~]*ϡIueY1Njޜ Q)#-i`eR҇>l T)qs|竾I AsoՃOm ḋa ON}bV35ILYLc&>CE6YS)q8i/tƶU]ngӭpvKX%hKZȡO /G{T;rGkzx p?_UzW\.\z +b3F3 Yg|7k &' n.$8nvci_Q :*.c`< wL}nfeLrI 9"RScSl`>J|X"5[aiL\l@,g˅r?NNf#.mf":b+vE񵄼**(OHMV؉3u8NbtJ;dʵ PT(67 '/ [r|yfґj*o'|_ h_A-+gw5UZML2~,T?$q-`7:-!ֳ!ExW݄*L`:cgV3d8 4xz2hMtQ&-i-7r~|?3sϨ>c:L !wŠzc#o35[W n F;)ŘA ֥-ak`Z !Vcs1d  N<[:?(O!+7nqW逦Ϣ3ǂwִ[~أ0]Y/"F*j#}.q3:dcrhk?%w_"ض2QL㛧c]ZU7Dz мb?G!͎ %i .@A.bup$#?n4QXBsTK Ɓ7k@|d>ۈ26.UzX+OmǦ\mh^)FEOܸ3q0^Xpevv\0g)^w(֍5d\&]c`{S銉 h9+Fߞ0ErGv(55J\ a¡89\ 5&yKi|1i5[s1+O { h@up?xQ5xHx:t(/?JCfP4A78R>|j3=#o'L3`77uW]&P~xUeZv%➇_xF[[#t`F:# ɜ7J{j9.hfc M]E\/ 4ل4 i`D'dQǽHZ .T6G,BwB6Ѕ&*mYm 0;DgVN^?ye' i b-}0ZuswuiŽMNOS;pA -q_h֚oZ5gߥdv(ϙP圤*dкC ]!;nhB(;,DQ"0qc4/6U"8 ;=2aQ]Śq Ms mJ;bn6-xHz#Lnza5 -"&f44J΂hO9R.!hsz4>e<RMxF~V OžH4}|Rjsvp<ϢL3- Q'||n^;oN~pj6fV3kZmGtcchҖ„xmDh![Qc)X#dl:H]ye̓p3smg%PDĐDS65t75BvULƤ`Ӯ!{p-z"k[])'KQ۷AOgGd!58\wn]_;m+}T*F†yVY jF[;Fx㚗d-YI A,Rb:7r%DW._̢N}?S|7Gc&hq%+`Hg)yebUs)&[<;\ IidqNL>*mFF.wOxΘ8;oڄ)mlܳ+2Ze)M,2&EX>: S bJqvi['4{7e./c{9840Y27O!_w)>Cm*]EM !]9ez ?(_>>K!Pbj#Y\D!LyW1:~xaP8 ;3P6YvfZ'!jcw얣ӫ#-!RLzJjRflmN8 >0QGO4nRsH*_dA_Ѡ<-8hNpUF8YgmN L-]=nMPD%\܈[%cԞ?BW_D yNC'ʳl1 9ZK6R#$Ch8&'aX,@K`5u"* wjNV$Z8X8AYnflُ+DI?΢ W,IT&LRU/q|.&zpNK@.Ù'7рoI(jME|!9tԣ@;M ǽ78@EhF=~ӰP}w?Sy34˴@Kl/y#J@a7$IdRpm =ش'#C^=^[4ˁ ,8{lZ<໽24st3) $!&C'/AXl 339QG4}.Kt3JRXَb`ea6ܛ7*Ca<}X % ba >U\Q7P{)4P.G5lD#V?9m1;u+ZXx3+2dg q,|v ^B,GkpJج N=j=|$hQ1s5kBcĸ2mҞZjX$rG ek1 ʺdK될K77wRdÂYf tקʣ˲|2=#Ou*n/zbg;X$ly}-:D3xl1]gl o5H*p:dɊv??Ɏ>K4t8U1'~]98-,W'Ѿv+z66R6CWIOҞ^$*a2А vR8= z i(Cu&baTV`!6NJ4䆫iȰo7Ohr'RՒ8$,j:ZNR!j>FҙgG CWx4($ >:԰v-p:OSS詾ZU_óOS| dYb*'^鬠W:ԓ7 mqJm[efЄ-kC/|doO`11.^%!*'i|OMe$ۯW2x6ASUܰ,ewyo ؔHY~m Zt խoj+>y mk n@* HYaX=*><ek (FQ9G00Jtm_'"=mXO:x<렿 [2 ŦẒ_:cc!8U.X^m!%KN֗Pav#}pt-V" r74s'҃ˌVKL(*ɪok 'SgDLjsW9JRSA~GK^O82]sbĿ"t}p|,S_ٛ\=q!WWL]f .0c 1aaɶ{B͔DwPK0I_RCl)D|I+{>," bUb?g:ξmRA6zL*weu4#&0Ւα((.E`xݓV"ꬨ,m)Skch ]!/WVDu$~zL995絼Bm,xCƔs2pG%L5FZYߡa詄yc'&yHRJy>KA :(6BjTUlg&e9o,s"ϟy0yʻT*k!=o!6O"ӢnLIYW!v1FE5Iv)kLldezKpQK`5:TFtwh6U YkVHMBG?3 A5efj:y =zI7^`Q#| B5q`.{[J,vTђ˔˳.I-vIqJ:}Y+'2 v[;o*B~W‘F𧬗^ͯTUX1Au4ogg{H)OG`<dz4,I?BFu2NHtKyUDdǺOLgAiyoq>бxdͮ*RZuUY4J͍)bc;Afe7͞/hY&஫.ANЈTB=46a s)=^7*HWnvb]B/3A] zn 8| X/\qقx1h/krv3}0g*48$G9 J1(_^Lb^*f;x_g#*\%Ku^ ,lTzNpPt@yNi͗brΚdőߺ@6wȽk %SH 9<&猫oGş ZtKI VN~$,|,;fjvڔ1(˭JB} 2c^r{!۰{i))koj%g9mh0pC:nJ52 }JfR6= J*o'Y~nm^;u.\Шl75Lr҅Jȳ\Ĕ"; :`TT,Տ1d?^@㵾:ۇ9s tUljo: #EGz` ĽѬ~p; ]s6wzwX ,>=y9@BAdnZߒ&~Sn$e*LIl iF4MQb^ww$yq7ٻy=bRf\%"IuSȞQBendstream endobj 284 0 obj << /Filter /FlateDecode /Length1 2537 /Length2 16607 /Length3 0 /Length 18084 >> stream xڌvsxnlۙضƶ35Nc۶FӤѝ}v><3̵:Pэ `e`feeGҰq Pi]\m!#4u$M@N67??++\6Ef'G+Кxsؘ:Mݬ u'svssgad6upevrcxڸYԀ@௴JcFhX۸di6@GWPS(;-oF`cf2d/eSss'gSGoG+=,0uK oajcojWi1U)(djbjcW,ZBW|6.@sPYnlihaW*,6݁rЬn.VVV^Pc@/skhx;d ` Joc  zn.@2!,lf@+G?Ad1h \l!d!h,,Zj:Z '7[\ `bpmH?CU xA=3:Rr 0@g XXA_lS/װecwLK?2vwh)6[qw\97SZ9ZFdWi/dͿjQ`bce_<ЮہWt{r4wkع..by0;:T N. "߈"X$ ^`X 6 <(A ȃ߈dST,jEhA Zȃ߈o5u5u7/2h(l\rbȮh,!77TLȘ=h"/? ߐ?$@Q4,r;hR 6b;HEOP2 5 ? +vj _:Yr?Rv/6(?l1g}_md t݋O8A%swGW?f@0Gg@ T?B@Ԕ $ J(r"ڃ`P³+'̿MɖTчTO f À"8gnTGf${"="{IVK%,1Q2 !Iea52ʲ0=iz[=|Cl9Yݍa<$i1\ʌPguhxcqt-cU J!Uh@r4{ǽ\"}(2R-gY8@Y\^)\ς8r+ѳV'ab_7,% j|Ъ4aw_uX8KO$boBhbs+G$>{Pаm#5..d+ĦhkӪD,iW ,6_NtBxMffYuXb3V=?n1S|X/̟ũCFifƃ=D>#2Hi"a/&k!Zk:@CGAWG Lpcaމ D4מ8o&LȵqP.amAӟd4Chk] SᩓP y=3>:ŢezOWNַK2Ip&+Lw.r+NgDDB@Oi50\x5/`,2)zz4U %Q|aҊ3xm*\'h;P`HQ tmj0~>/*I5(/Z:}K;[vY` ` #X2z(mnbp2P!81 ӛŐ& K-mZB'4r>Qu'&1C=V(K孢 $2ƕ"E%a3wԩHۆ& P>F0R*ΗBikpxT:0'Ig7KZ{vr`}3N.ħuTQYЫ^n-Pc< WUT 28,O2G(ybKM(E4%"P[nQeشȍ Ja`(N PEpc$=[=7k:|oۏ#M^YLktpt1,aAC|Jas7L⡡wxY#RpM[bE*ZiAI ?Y=zx+g/o$o>0 >a LOH2>pre{\Tn=|zguEgC=CmVei#NE_? QK-Y봱dj*W1㒇t 92V'CkfX5tUo*ϔmr s.4/1q$>̕9rrPFz;ܚ 8e]I^B$PN8/JS}UeSnL#( ' z[x"t㴭q~ İ{4cj4͑6gW9 6X.o,C_~UқH0!B`JV╷ %()0>7AERԿ h17uczf>H)fĴP5].2/1r@MSc^w{t- "4SMh^-)G͞"++J%MziP4b_Ϙ;7>\@+r®3+5}izU{@eĂ5Dkwg(D~yǫ_pͻ_脾p,nEr GQ$>dCT]}qr6:k S Ϗq³m:q~Ǽ|%kqO{Q7JSS0'uoY$qvVUL|S gNԂN)Hvr:Ci=_PUkZ˵N7|9ta鞬[amYIc3>M >Jk#"D֮%O7d2y~/ ,BQR-h:e$IQY'+9"1ܙ FeC@Ɛr(ǙX<FCMHHon_^3yK ކxf@tןLQؗ=Z`X7B8ɆM|v㊍: 蔕m n3]K+=Q6ȶQ ||j+w"C ,Xo{` ;>+,VʜѲzE6BtGC+\:׻v8Q G/!jvޚz&E~Pփ 붕X^3" ?@Dn\[I>t>NBNuHr\E>w"Shbn9Yv]>սS{%VˁyN-V"ad834W(]/Eq<- T(4OVA#}]xZKuaZ6g6.ӛԭN6kǵX<ݕKѺ?[98DC$D;ܛm[3 O1wL {;t0_B^ՌZxsn;>aؖ7>y ,0%n+1kv]%("͑P<'5pf$ʏ%X{ՓG>bxoj&k`g?HViw*,o ZoOwRDUϯ/x;vC.:&nBmP~';Dk@s6Bawbilwer~-qh`0!d#/0>X{cc%# lhone,מ _W3.Lz9(I "bۉ$ͱ.iU{NB9P{n#t3젪M(6oFk񈖾3I|^PФZkk4uAnՍ9o8|I}*lB*Ո&:7ʕ3GF,`ʯ {ԣfsV#bf—1Bou*DWu+tٱi Aq3r6=%_X|18Ba.۶I 0ې5ՏE%1b(J^ 65V4.9(C wJH;|#=  ]=C=Iܱ/`3k*9n͚<Ȼ\ƛ@?4)b#0nB '{ZLNv@@QJIǂlsn]6 >!ʨ>+_)kK /;$Muk1_U]U %a~[|fQpna[r]Kq9^k{Z.?@ͯU%m8}:X~>i>#J_&mm&mʚv(.CPF8_rHqn wg 9*!誾V DJ~؁}$wvY)ŗGj6= %@N$`+N&@U1%G}su؋ ZLc> :NTr4`Bep-K5(~x'oWĝB5&L?y,wn(a[8٭v."_TN;igy"@|Iߗ`\3$+k+Ws2AM FwU12K,R2Cg Q%?*p:ϗW:F&[0xHОGͩ2DpZ(J՝o!r)fO'{{A{^)5z?ptB B lY;'ߴ.N0Ge$VʥhDf}^2]úE.GHjԋkԴ!2VzQ፫! Y1Ŷ jǞ#ROpHVLEͻC8XCj= ܏-3LD;c2t=1s*\+ PAa(s)>ʗE+t7T2s&g+ڃlρbeQ!N~Rz_ISk2p  Ra3.~fy_ll:258 H?| Qu3GX vw b /*1;AgjY!z9a2k<<t3 زess V<sO[PGH#,Mh<;t'U-[WSCOvd䯂̰f9w,Ոn/ĔY;eM+tGdH橄iMr-ܼ熯_"y> Op%KWx`'Lf(~} VrrՔ,׼)Fص^ ~\џN^zP9eu7Ci'FDXbua"t{I:Y5#dmPm`[y1|Eצo-i %>HЪgͷ \f0e/7ؑc& GtjC!yS.{顰J)啗nm\8e6~L"J]q $WA{O-̼/q02jCP*C/gCy iY} zF=+L;j!Hpk~ެat]¶FvfP:J]Fj< yRZѵ?RC$$o\+%< cN ,U]O"&[s?v8q:;?Éy0m}Xǩ{ IgQ54{>͂jup,x}і;8D{9`vI]CCO߽DMXŮasG&ȹaNi8gQ~sFx ׂØmrK*Iݓ9qX Yj,P!Oǀ4J\3tdb:gĺS&0(J}6R۵!$=-袍Q'5 A #2ޱ ЯȔttFIG5DAL3ua_.uc"/t^7fIr0/aXk+]Wc~ϢC= Q/u3Sy(ll = zKfs=/\0wCRy5vut6m 2/3T%,2 ` jhˀߟзga\`|QKqxG36>( [=nGi7|>o'XN81%n?ΓZwu<5ܺP6 (?R5i+΀zE Ee!n`~I$jb2 hLm s_1,݋=-/GOvTM]1Cյ&ٴǙ9G.fʵ~16sq]MXdT<1v\{U9D=\+ ;ӟP_yPO1J{82T\NB24 PyVYQD`ɕm~ FecAj'59ف qJOJ]aq곝pSUd52UmԎv$LQ5?W9$BGO[3r>:N0Ϣ'β[ 2y{pvT5Tÿyc)SuKNmۀ:zz٠8+BEXX,:oRL6I~62#cSvzt?)#KU]+Q!X+N;+En>^PqaLR_דЍ]3Kz&)t)Aͻ6Erԥ@N{ݺvɵj<>".4We]\^b.W) 4 ~eDJd68ي{p^nhӬ֗ԵAT/g|nm=6{A0LqA|KJY=U vqɜFu6K#ܱgo }>Bya=~XfH̃fx'GH PML>eod2yi]lWaZfYjDIČr!!IVTbQQ 9o상;V/Wjy̡oQq{k23 h|:a/^J f!_{̑ȨEClems^oN0PJjþ_8LE$W=D]}rK:)cߍfBKNծ4 ~, Y+sp!x2 Xxܣ=k"Ab'x%M\,7ʇNȆ3!ޠ$y+^5B쎶qu*%6٬ ^OwͰS 4kԳ5HMӦ>?PՍ8GZJ㊐:Hb .zM5zQyjx.vS]k#eMmrGHpWu.LIj`><;s]ҶW?_2^ a6c{y)*>7|ܯifj7IQ*%zeK1lwy\+X{?ϰXq24IIX]bgg6bx|[a{}):9|?G.`> y 9Yك툸 t/js9ŧZs:фD߻df%ʕs޷  ,H^d^|CvѾpZ̎  P*|^EԄD"VVWȃy$Il3 bCrЎɥg }F/&.R(U2leʖYX¶&NJb;qB> hr {T8]+t%OSTA6H)]>MrʒҺmlĄ~8Dhj$y`Zz42t_G T7 e(鋨(oUx{[&yWeV3LF\N y^9&?c7 p¸fO&8 ^~ǭ*),r h{8%r RA$2!FkJҔO{q'jJ+HnN8yRen<Zpmóe-Q;~ s4f!sP#jihB =-~yB&I QFs p58F`aIКLRB\RWc$u-ȬBȿux17raT$$f2ړ>+pJYCs8!82/C) fR*9U-wUΊ^)oOl h Qv r&ke,}#4p_bz%e76*؏J&8j;t+] #WY/ o/,ЈVc*eUi ~ݴn%xg?ٮѪj$BNE+d*s5r=@[K~2ZIWXX^ GD}&?,A{mKj'PhVb{ߙUnfSj! NwIYwbX[`UGIe7'* q tBPټLSH1WkӳjjUU}HJyx-'_ՕM5 hE/БxfMzds)M(eVa ^Qt)4G̖:{eU.GҞVkxo(-Z}oM4'Osoʔm:纳C=ͦ z#KF ~+ b]Vɡ` qừ@bLԉu`C Nꥱ[D ^K|(뻬I i.БX%[F*a519P(!bl/A9 G>84 Hkm6?b{ < %ByHb8_Z"4!m0=gd ۾-1{7"Vׂ=*W=ð;: (>vBs^hҍMM‘jQ='O~AsNQtBMp |hپCN0,f bRt~Fw :Y`qA+75l>1䚗l7\7Ն17#=+9A5vdBs[b.,,ha·C ʎ|)LUόMP`efs%Qգ`uU[JCj$/B̗PpFK;ͫ3Gp!TdH~n% i qEڱECخ9烋yE=*sfBZ$LM:V4n4; * |`SKBSJ'I*ͨ؆"d 0£mӧ3/ PgC/j2xlNwŤS~ӆ8Bf_UIiH&Vc>['M_uUy;}P"f>Tn`~`B{ ߪp2 m)lD(v#A_20U||`܏=Duj xKb ,'o)/t> n |2q$v5V7VdǸL5ۜ41'mXZ#;( "'Rl-D33VPsA2ƼE%ʙp9q 4H= oTw+AʄH{pg*mmjf]h"b&lՄNጢ \/SL=L}6l1_MRjPy-ݷ-$պ>tUvQF_-*<%|VxCkF:֮ >&GIFZ*rmm_gQ'^CوYXgN^R3Ʀ5\0$nbn`!=&^cZ0 =Ԝz8'()m5!-[1n:==;yH;'PVonU\}lqw#ׇqaUD'w0w +5kĸcqW)7kPW@a_c[aoa%6(.(N՛%hr>Z#>nwm KLG~̼=WHg)iB^qNr-DyRܐhj :[}މvy 4*)L3vLuYA;냐̮eq(vTL7$VT{'T@!gzCݜ_ s :-gCp A:BWRft/Ё-Y Y0j:y܄iiz idqK)rSZGLcex^Jwڅ-9MVw^"Ǘ1W? EF}n* v΋H!HzBR1's+7H m je54iX9`سEQt‚[o1? ~SҼɐ,=?gIU/k|_X8Et>} J3?apMDvW5uP.g}p)`o[5S&6SAU! U.u ~]Cs38aqzj:EoM}\k9pλ#ю%aq7(BQvie7%(g2+xF3%Zq/OR9xh{ـdzMVs%ıtFhH7&.W5<7YQYŭ!ße*zGe3[9dx7.[rI,$eSGp;R~90 u ԄDKYāSN\0;/#ըoTG~Vh1f{e_5;='Ioz6l\eF6{ Uo.Fa=e.8m{/{H}aaF`g.!Q1mԎL/HOwɠJ7M̉eY5˝(>_#(gK:*JԺ-Mmwr>Oo&?PO[< d+f,6%W-S gn#3V~ Y![lˏp\7@x;|¹cqmAc5nG$|U8ȍGWR'?,l?ԗ@u; mY(̎V\ZV| BmQoꒀj0ΧlU{HC J :N'+;W~}yj Oy5oٴO8>($xw(XZbrQAZxo{PB%QqފfNG58}=$N&Q/:knW6ܙYxrIw$)錑MhՋ401YqJpV& >#TK?J5\zz_/j6O&ӨH_.s-kߴ=(靣CyHy S\:Mbiycbxth,ߙEI JU7 L?(*0ԦT0F{t[D]!f\7{VS@܄bW/d!QYmtӁD-NV>Ϡėns;Qbh^ޔLJ8 g i8 8h;fӼ 9oT6OnJPыʙU3Kkqߝc{~idmerrkR#+oEm˸WV L-f'aߵ!k\^K fr2CiCjn(i~E,s@2Kd)G{^vJ ފŚgvt({Agl[oh"۠Z^An {θ+e|m1-n'cX<gq&gLp7,] *?Z;/\ՆnR+6Qy،܂#q}ĒY0w3$,y8*?TZtnY&ړD)!h]WKD뾏~G`=͘4efaMvR 4d,Ap3*W~T2M ?n êZVtF$ 9x$ZXG ?H2y@w:F.$R x!w[sO"doD/_'+,^FkgɏŸ(R[`QKw!#ڮt >{ LXliTB ^}rZ V+i USZJ~WDZw.QKU6~! bԳcuf]^mVSEiy.C ;k>k ]J덌Ka>P_WpU}//@~XrLvsjfE8\zFE=GzDV~Bk&lgiWP\+hTendstream endobj 285 0 obj << /Filter /FlateDecode /Length1 1401 /Length2 6170 /Length3 0 /Length 7122 >> stream xڍwT6RuܬxTT Wm ~7o?z 7͌+Mmœ*Oz.}o~n%bsTp]ڮ7Y{$bpԟkۼ5#x&Q6 MsA~7BoqK^ ىס#64cO0 kZ)_οjBJl@11c-\Vz #<곉9q),C_` =w"K}`dWjew|Uz_|&PEɋ+$ ~ڼ`~q1͆aM6z],$\ 5k>2OJ}I/؆II )z)~z%>kI6~i\L|/K+84 I _\[Y BC'>+hJ}RUjԹįCm`# DS*_޹GxoY|{$[b8H)᷁MN~ƶMeDZ6o{6X-͔}7u1q7pd=آFN k{]D8Bz0sZGzntm+a6Ş-o^YVa[5QXnr)/"c ď =[ZsV8[&7]:sONZ?;88Fpg6~%\~+]rڨӀr\ [8NUGpEM O/Ox~LLzTwڙ146{H4JV5rR-~.XRX٥9$[rK 9x32ۚᴉ8S*QQc:(`-hE4|͚&&#g.v,J)۞zRV16[QgmC״0%|u'c 6^k$IaT-bCM08J'UCYM Sxr,t]@xWy~f}%{`;ǻ4/m4 f<`TAĎ Ҽ3[d[w+Qn3 Ki7u%{s0/7D5Z7]&W ւ$]a3N/Z_Il>c:_z 'c2R ZdOb_kXueF+YիJF0a\,?9+ghh>| 7nWP]Vx&X\\K륤la5׆Aaamp gdִCW}St] .R1c䚜E"Ong̢9R#_xu\UY:{kgIAK&Zv| ]SڏڕĤiB IsM+N \'j;Ï$vn0N ?,ٿ>8ɑ1ӾE1̥A>#]7yJqKy1 / J'/Ned&I6 sswݟ/ =q_ O\2Gⴆz}0<9 E =eۦ2y.~`J̕Kw{n54uΜ֏6YX;+O#iMM^&vsx(AL8p}7 g9-v'Dة~$tƗ{NXMQ{T"*Y pqq|%3‡uڌ,tqWX*6M"hа[D}H|]e?uqcf?/yqso}q9oxl> Emɩ `3 ~e.<])*&(D=k5lYa}Mڬd}3 (dRIDꢣ>YQqulo<26'JWhT^I E& (mdݩ_,@ ϺS^>CLFmŒ%v3 e {B1< cT!{^J_u^R̴pή%9( ._a6tae:[Z;i֗oi-5?kJ`DPb:!1Mr4sՒ[L}ޝ($!՝/AńQ oU3Ce&-g[ncM#޺u>kd.U#y ˬQjU&ڢ{Ԁd:Ajh:{>PF2I&sJX}'Q$KH(AiafnM_Qv{-U~P27m ʺqy^:`T%̉7iqgUicc 8P.i{#n(fMR5AgᵳMA?72{zq.TOXVGOdY[2ݽcJ/j|2A>,K{?Ve.ikqк2UbX/S?Hk':oD`(C35Zg~^%X]z3ۮP몏O;Hɪ-U>%jؤV48۲D)a_Lz /?%v+- Bm~łB 1\&0 :vci8V071 9[GUf% -ձ,$ ~.RxKzR uRh| إy_UܹP+u7&eNþh3 >F}L$yF) w*QUty11V38 .<,7MJe}:T&r/Z0Mx, ȒW刧lJ75*i߅ WW"J ŗ#- ~M/ 0qlDdAcU]ʿvE&?&V0$f|A8LWsF:S e#x?U; ^Ƈypw C@ydch죅cm ~̛TIٞgny4`/gvIwvV#s0Yى{n\|JwsN,4/2tqTm"os&$ Ș9MnMȣ&jg&N?gZxW@^S"l1A%Z}<(BR>h&Sύ]Β`Za }SPcW.IX£L>Rv$wR3 &qfVc!!d.V~TFoBF}wd,(+XlHΨ;T%[ő%w6|%UvPNysy%Iw_ѯV-@bN3y\i{߹}c]~,N Qus I\0+tQJo\|e~sB`ёuzGWh iq-탉 [j˩nmdHX+){H4MQsl#"]gưBP[cCoŠg3b9#'( )&x?YVp5IM0\fyZZL,2xxe"X ~U5R .$|PwU+%~sJxevOGRjx&iLc$#`f^Bů2lIvGQ~ Uwo@7Ypy02ŸCJ"u1gf+T“߉N Y8%'6zVD`s1O#%zKmOyH>"%gu5bY<{>Gc_X<v^WСQ 38m#ۙ{Y 鞽Z˶ckv7JD2_(N7݋ %VJFBGPRZbf 3&yyWú]S_ Ǐh[`psy!8n_cWVDハV@?k6< /)jߩMt^]|Y˨%j\o^mSK2ٮi)$8’ڦc~LHS9[ϙʦ3Jo-^QѫI˧6y\cg9 g;gw`K]|煒HNf6Ûit .޾r64%JpH0HҮ L`k`];7fJj](zuʮ =?C; q2c_w~p%寮kG ROa xVu)AG* |ДM(ռpS8NJy,׏x2bLj!Vt2!Z1@Cc8 it(DshzAOĶla@eM9b˅}Wyq x5b34do-Љ::|[8[ _oSs&Wi_3s髌CӖϗH {?E FU](k-G=AiO=kT6eBVƙK&ҁ=\pRAECJ6K7<:q/x5Ǔm!8]ؤFC1NNb 1@,|CJ )B$n1:B<$aY}a4K{26E0!A^z{&TOeǺ\xm/p+]'n g%եW^*s9v.jTGhډQ![yl >7`g Eڹ$SfMryᖶ&eҟ5.TRvX}PJFXKNHlBP9.3ZoUz7m mM e ZL3>S`c|p>$O5t~_`+)3]-5/|3>GR__3s{3?wa~endstream endobj 286 0 obj << /Filter /FlateDecode /Length1 1461 /Length2 6532 /Length3 0 /Length 7526 >> stream xڍx4[wC:{c0at轅{ z] (In{oZ3g9>3kXu xv`e8 (h@ /(bA@8,`w& `[SE Zp@   Po"]hh0@#sC@0- \d A0_)%7q>>ooo^[W^47p=^`{ڶ? 9 o[w0 @! 0  4:n`_dͿ܀?O?ѿA`mA -s8@`&/ "B=x[/[I]-@YN`O wƒ#߯4mV+]]0ί!`r} r~ag<) !0G0 > '_ ;nd@a =o `!v`G H<w?'K0?Ggdl8>10/ ;-O+V U.r. ߹H1PBwM忲?){B+T'9Zp,jktO!l sD*/_8Cׅ @N/׼A!0.AFCyx FпUM@H`n닃`<$:Nƛgsse;&l0Zᅿ&VyʱL݅ ǽy!uc‘`z{8Hf:Q[uHnms=+1/߽̌y1j0"Ph$B3EBo3O4kk"IHRȒ[]:HX@U1FAM6:Qy/zGl&Xd~@(Ț ߷Ϛ L͡1i=3A/u%fbKQm:G?&Uf /V5' +|"1vpZ/9x$eCCTĹtXc_eYARQ32 ۔n&=e {ʲ?Dp+#}`"?`w@[Mr(Zzz5 V "EDfM-B(i1!sɫ=?E!A"-^j4wzYUc9^ %yzN8Tcy].vm9K]+ʆlR{U?vЅ`$ؑ}Q#]qq}I¶ ,:{٪aĒ˺l /ǖ縇ʴddVS8E?PH-{kY n6:G}Jt8"[3,UpR^:|"Rtt&Q䟝&>;xfh1CMwyz+YKhj=Z [=G.:RUFOPϺpg[.9EY|dܿjXؙ} yzbI$ MOhhc KZ[a4;E]>\jX%2`*HIB1:"Z*-ecd'Ov2EL_TXaGOfp=́߻NFgWiW9xz@i]LYmqcb8Ƕ"lGfQMr"NӉ.A,`{vq7]WQຎEԚ_SiN9[6MIQcs5UK޷k|:;m$7}p ֻ, 1 Ydy |^s): {YXIٛ. KX+&Xr7(q_aٷ6j }&~b >zT\@?Jxz-Ekh_9b!ZKpIE8t ߲P"M\WvFܷXjH ߲{O&GcZR#*?O ͋~3m1X*5R\ FUcG7]ZhHkB@'To -P C|<&"tc8_(.(/U9D!LSG@ېƪ~cH1q`@OaN|HV*Փw TɧG osatlF+, 'fEb82'Ԟk4I| ysc_rͿۯ U<3G h:O$̺V0<|q"!qJB32-*oє92NEsb8Ǿr0^2^?MsoE;|oW8ۻKW/SMRG+mۛQ,,T!_^ӰD4ڙW߶h PJ5 \:*¦(*U^ѭ2KmO ~ɋ>0f"a1]C)nnʜU뗳uf'`"D3hba4߻;s9Ǘ5PRXmS%:Q9>ʥ8ipj.7-qvx>aΰ3?;iIsb_}䫕"ƢXCV>oFQW(19L> A VFʉC&]+\% D0*}7()s0+r9 H(D u ~ޘwu;9T7կeY }푧7>{|,TN,RP֚@HDqs읍}*ծ''V,O?jŠ*N:@~)`z' 2Ͻ^6_p;S\0ЄNMd_a_ICXةGdXi;sؖ⪞svrd訣A)y2 BKJ 99N@U~% /W0FݺIo {At5n5瘷y 9DEsmW%Mq y6׎^(l j4k{4xz'{tO]?y^SQh,^ѥ.SDd$WUY7XDXŞ~ lpt%f7SIڼ~(a3G%7*Ū*jZwذt>7 f'2n'Xәm,R}CQ* N O-ZFw goߗHg?y&P>&A(ǠDk.EpOuA'^=gAK&yZ˂2+N,PIy1$EfaI`3܏)3%I]U)`4,>h G;7x"5m{&EMl{BW6s=N^bFν|9X~}zBf^Xn*Az'ti͆FgBy|]>[~d|}-7Y~0bo,Hxm5HlGhΫ!сT,Â8kw5#Bw̉QquM=O?Tn,gbjym;S Wx&`r-',PߙPF,$SGƽL9oM1זt~}=_CF"8Vs<8^<"r<FczNq_6l\au`eH#FO#KOȼSQ1ޛm;j/yWR Pi4+T̴MWz xv_My[\i`3wPp(R#"brRRTх:fZya™^NEN a4!ݻkL{,7+6g>%DQ Ne̅(#P\ xL̉&)Z.F7>yko>oV0_n{[O(c5cƘ68>;( K1Iݎ:>溨x)텬mφx-%˿u\MB!CFϻ+)VէQ̯~O+h&dy]Jܺ)|C)Uz<2Y`'%ZCݠ{q?[ zU^m煋wiغ|5\LjwJ^1eP ͵;>hR{wl7ͽE ES`z4ap5Br?|R'K "t5XQf~FMZV=˶]k:(q9 ˋ p9x) /cm r],u 'D4lX|hQ`Uz?T&=a:7ߧql:ҪXW>-,L$JQf7_=(Ǧ\^ųmU@GP|QPİ3 XUc nwR/W N9Cծy#c0oǮbc ƌhҐ$#ӒjMel[[:A򏁷=xڗ]ѪE; ?8!51pOv$0-ۮ3vo](eo"kS_R9vu3,Q^.] s BRxROJS'>OE~isYm7L^-6s, Ku|ս+X0[a-FzF4il4 b~SJ [4<Qy"uFeSu{7`4h37)jS+<>aCacy4ZoXl}wO]`m@ W:Cu-:zDz2n=lRB6Nٞ1kk0jFkW'5٥ N{sW4.00zFHũGjr_Tb;r*/-qa uN)/G7ּi4L JM-m9R'fL?2z]DzY*?3y0mF!r9?, ^#.ɸzעXƶ5ITDEhhS"endstream endobj 287 0 obj << /Filter /FlateDecode /Length1 2154 /Length2 18410 /Length3 0 /Length 19689 >> stream xڌP\րҸwww8kwwwpH߫ s %O¦v@ ;'f&43B9 QGLPq09x9y,LL1sZ2v {<Mhܜm&F = @?!,ylhnh e-߭1 P,,P3sv3r6&@ӻ xP(A2=߇`f`o{d0%ݝF ӿ l\,m . ! 0z98Z;;18Y#_aޏYd*jgk 9;!U#=} ;7djW. KؿmEd@g; X0@/{>^v6>f?^NF@ 럊%ff3hn B] 4߿;31ځl<}ŌjʊZtnJ;w'Vv'v&33 (YJ*Sg BX v Pt]&v&_oSWAIvџ%tt*Y:Xkj%Wkl,A@%;'˿0^LLGd&o[|ߡ+23kX9FFwN/4=F G#_`K/0!N0!n ((2=z.޳ %|J|j=zϠ;/q{v?ni_bיټ_$llIlmu0J_J?XBzK--[o|W f|/Ogvd{/.{RWdc^s89r-8 w{wqݹy`&.6 ߷?G4AX3 nig 2Ns[uO9P5'rAcf7a㈥Z Z;%gx{I΅eW]ǐdjs֠2v-4MKp3v GP~WfqKUẁ1  "U8\RxzMRh$P!w}or#!uڡ.|Y 4voԁ-N#dR* R됽ɐH3܍6yi "f1_hGH!t4Br#7T}LTX!|eebфMa4BrKj>ϕǖoI6o!#f%>Y&!B^a]gk2U=ծ7 זդUQ?34cVX.KJY9]r }0\|sZ6ʶrow+F a1.J%ꏊfHes>b,=fStiϕ3Oh!x{$aа0tf\i5^AZy FĜZS^){R}j7V=O?`f,󟤝q|[Xm{Ju*7 |n}Q3ewR5_ 3å/be;t_bN/@HZi`M`Hn:g])t( *w1 K九(9?8P]SĿdyȰJS }Ό"^2e(i~2|۟ϧNפbwYm# kY\vB|a|yPZvR_񦨼y2b]IRzb5o4tⴙ6F܂,3c xD3(8yL kaeRlK@d/]FIޜ{57x%OD/Cv"riфƆ^y]Qv9mATX*İ!Ge{F Iܤdyb-iYBnE\q4/on׌sM?r88ybUXoCcNz\qJC֎z:_5Qp|Gƶ7˗l_Hx pyd^I" h<0&3fl4x.Ws'=b. Lݛ:nwhgyv&S?{ Z~t* u \FN+Kp?`]w4իJS?A?84Dz=A*7N %jIXq&>+,sNvH`MObΗOF۲c~<>h܌zf'0Z6f9%ƼDa},\U;EӦ"Hpse<ocWN/E1jRӰc VZfh'8J$6z+g-Mkj=ƿ q!ffՉ_+]\hZ-zIaK/d{.WʍyW3R_)~wk9ہI6G$օu*pک<k:܁e@; _R`EB&ga$=Cź2+"Q2^Z* ‍#=NuW$J<$[?&PvY[ȯŠ;h*@<s,\3Ȩ1kZQE*A2H,p@>]ӂM ]ѽ5]ݺPeNr':B@ͼ@hē[Lf㹒4rz½/gnMcYr#Nt:q~A!񿞐v$e\mwcr[YZ§|&`?9Յm㍎TTXr*EI~t7]9s`#ΓFi)#{5,V@ Db]JF&HafO7 |%1 À?ѱ@;6 [a=c`~fGQ1bȷ:| 0Xۗ /%J,8ޫF\ya#UJlvG{_Ў 6wбR\Sgb\E] -rݫn4F9D윤9m<40DC s K~k/c3٫euH߆xX x;UM.߀זo21)v,G8-:+ZpCngcMF&ݙ~G٧D~tة,KЕc: H@XrvTn{p FUyiavuO( 6,x45@G 6ל0ė6*d*nAjp4l'JILz>2Mz=#'ڣawQ0$gUTjڻ?lT ׶+0DP##;m3F:cyͶT?ήܝݓ{G)#@n<_S-Dvr%K.QB4'bn>y*5{ב0!*}.JE@xx`drBZ,VcpA嬨nV jJJOiX촛 GVGKu3U818ɛt 7jf-2]_𶾸`E5c)癿lK5Ssg&E/tk!]_[[0QqnR-ff0o5kY-^H-(׭h/㴢se`g8{jg~c e8kHtփ0!(H 297BNk Yas2rR=Y:- c(FZ<-4aM7Tj|BJ\fx%%We!#50Yڍvv]d5hJ9Ud. 4}$d\@D9e̛e]uhS>!ܡs `^cίgI}2a zGnrY&qRe'_O`X+RRpfz~H!s'0Xz`GvEkc?VbO#_N?;Zfcol-AGsct|߼w?2yVEPj80݈Z52s%e9/(N {TV<:\l/eM͵Ku? S79Gw jnTtNgEq=DzF|`ZX 0S#ɆtMӒuW콷ݷ}-69Zvc rF5`x "^"՗-u&7nPkv5^:^5u}a$_![C v9#,ZѽHj=JrM"NO`-d\:b{!_ΨSOBjm:A #\ u K/fUH&@sR(=oЕ>z4*) sCѰQx[9d cg $݄qWxVX{g}7/LzYpARZOmC@>h3YR==aI|?y\* b0GB$[;I{3[.-U[əiomR_͊> x=pړ[Ii:܏wD>x%vJxӞ *"UsCIU<8 eP˰V w6'lp S~)rp-\?HycՒ2] x++94@:(N9;af#z3YsgٝB Vq6o'?1Qd<W'45?Tgn{!H2,P/sW 폩@Sw`G(>~ArEw4y+/0LAKAO j'PY3/H'5$L ޑmUQq&}IJ~&ӬD:^@ M^kx8l{j?u+W| zh<;͞%{zuq1BL%iY J܊(no:h RyɮVi8ܮeik0tG "rYb,sg2bDë|^R{L@g Iݒ~Mn!eXix?>\c#ٙ6\Gs5;eld̀;S6GY f)QK*[o];XQI'GQpJ%ya;ON\31 =+5Q>\׎ݽSy(6pVǨdV\Xԫ rRG0"Gye$X!bU˖ϧ$ cA6-)hh N`}:dZ#NX҆r9< kyw\Yhd7--Psɏe+`5+w[<ؼEZG>eS;O&1mmf$h$v:ng7Z-tمricE Lu ry \K,' d1]`\h_73ራC2OMX_?'EaƸ6paX6&^Nv 4Β?pYAIf|6u]ÆE.h;aV 2}YV4`|pZ*N<.ԊUv1{.-9_HBs0ݟtqO^ߏ1aZIm ʲ%n.Ց23^mh::}V[}Fpcmm%;å!+[<{`鷀q}3Di=?7.0֙͘^36Z0H-C[Z>rr[wQ48X!̗&hJztu BF:+SHY)4Ǔ:G5ƃ:dLy : fo_YѤ}?my /uһ%dH[R&Мij+ZE./ð{ {pxʿqaRǺ] ( ?گG=u(Y2yr;l8Y9ȈA (.HdηsWr #<%{쵽ZH̑QAC'SXpĽ:BƘ |lcGsw_;!hW$ZCƯrfTzn%Ϸ f X9-ę$ IVn;ۥ:^7QT˗70-C0y tmfvl`ȓ8Fhl;!TN$Ѣ~=s@:3M'GM6U []粥'M GVàՏn?G4 Cw/m9>>l I^Z{zdFe]?VxM3 TTWȣ` zxk*+lIf^΅hM E6_U GF xX. a%>PQgmw ,Q+p ߾P{~t[yDj6@^z>$;f=-풳oiCXjo׀(:'z!"Ւ?/Ǫ3̏I`S~wVuPlKy4%Dz-}iH"NsV$QQRkPnFr?N?/CGԄ<W^NakpE}>zĖ2/p?&mBd\ƀ;NYKl40fid1Ru9%!:#ޝ3p m3]~ԃ2T|#M^< c'Ph_@T)E#`S.`w@jf}OmS֧]ݍs^ ar8z[aIcyF7Pph:I*fp r]&FmhH{NJ+9N[eqRKʙy弞mIJZK,ցy :u|1)&3`oĞյC {k/r2"%+ 4GlkLi{"f`o}X:+] y'3K4թhaQD$y8Gɘmm+7 PiBnM_O"Oqj+4]#mXGFv:oJvܾ⌻\]^OW en,{CLkaoшL3hx;$!SQd?g&`Âg\@'MNzUo_i ­ ;S/uf,B1]ȡXQ^̉2`{AMc,W:jw|y؍es.bY|'?y]c%| R|(y*INw#ŜBFߍ{B6 (2nzcot e+ďnThu>u ]΅aϾd AfĂMRl}mh7n,C o}):ydҼc(JgRr(xX&:r|&1 XބJOhvi# 1k.`eeÂDT̅Kd`*/!;`hsd5<)>3Op$ʯQqv&`EԞyR]K1M'`\>A)Ja[HINޏcϫ/u uzzMʶ&5Kο酀t/_ƵhB_9AG5l"9S;.UaN.ٝ+Ցjrʂi Ra#DCB>lzl#5H\H.8*[_/oJ|[OsnRnWfb4~diШL!6ZrSg>Bi\>R)я&QfX#WZ$QaO#j"ܘBS \`mru[8ArK O#7yV7 (HٜYܑB%ȧC xD5$P_Sr7:띘eRF&ѡJCza7\cܗȞ-ΊysO1zr%.w&uLg_'1ܿQdb6%$P2R] Vi8@@!~''󾣓!YpcI $a/O9Z%V2E:Ŧ$?ΏεN2D7VZD2W_ \hLg M(CeTL/yY@LCi+Kب۷.Y/3LBgQJyzWq<6 PQKNۨDoRieLHd_.y#\rXpO)@~kPwfD`l+Z5:8GoNWi5[yGeEc`]bgb\#b9YjCʖZXf5|CXvVƘ;.# _N1]f@,GL'e~rrb/Fk=w["!5rA1k.J ?Lz *bm>rFVغ>.U11>rBаl Tvq?p.W@d<<İ4 iDu;p藅$dgU Ux48D^h7"2ˊ;-pZ9\g/el"aB/s>fT?T;v)(otf(Z7Lav=Մ,UE; .Y[eC3wVQ6 HK~newLB vkhg-Dqt}zƁݪ_zӒ 2OxPs($lf4#d )hjRz$&u^a.?{}$ /g$/}kKL,`#P(uTn2RP c#m՟]f6$<[*;J|)|1ÁxWSJnnp{Aɕ E "?EWN2^;$)|Qqغ)N&~H}sw]M2+ G<|i8)90s%-9'_< JiQ0"W|mԂedm1sb%/tqyUgbUճ1p+|+k{Q1'war&OdLQ fG~'Gv\MH>tAІ>POUd9:Z?F-t=WH`<Mi }SA>;Vi. 0;}̸,Uæ"G]8Q':eB|T5g\g⃠iBCf$ ܃5I-%"S6o}l<~` DQ/NJ R'=e%(V CRm`Jpn~:-y٣IfdaҒiTtL6ͮf; |7Nta$]ָRرӢ!/^ʣtbs0Un&!P#UyfߩyŲSŪvnL$˯a߾DM_,sᙷxeͅ ^=?[lϸx $/zqCWy+~|!,Pc5x/uQ1A @)JA,u%'a ӹ5BJi VAV`a 8֫ٮ:闑~mWl S^/MpOM!(bYҟxwP׊<|?hmL'wb:nf1CD`R#d$q;9ͥf(oLDiH"4jYu^a:axD[ʂk,{ƪNyDŽbb4H`&+WHbACcao !0_-cz*F#J_; c6ߔ#?9afH"7d#͉? x?F|՚b;'KǺRE6݊ <ێ@1̵{0ژKj&(_WS,Jͷ*P 5j2~y2eGƿ}ŊtG\ B?BY|W ҙԙ,K팉ۃ`Jh?Tp XEXbkW&7lvZ¾|yh6˪нu7pzh9riW7G SlwÎT}E R4\g^(Jtف \[u$?]q{Z6V;KPMs 5Ě3 ZMn:H"^En1Č-񢜵w]fK2ܠm!8c ŕ ]S5i<| \#~jx] ;{Xf   $F>\ӤU>X H,=$A]RPꣽjYvqpWJѠ[˜i j5@XƲ^ojq^sҢWWZ4js!uЩb{yL[x$2ԋ5H xDy_VIDF`:=K 'ƌ®f1A\`RғO3!Tu#:Ҏ^ 15 sGVĐWPe4HHcAVOd&Bu0uZr}8]ܴ_h!LrԄz WA f0bCvmiHإZwf⩅MT(CzbI*|K+!3T0ҫ̟ޒ<0j-hR`*.lQ"4 H+3$DT9}YI* sʲ(T2ӔүXhi[OڬTkr3`P|* ;?\]Aɻ.a(X4 wy^0k?mus.rO}Q0=g!Tcq#L?+a[‘6xSuÂG0fSG:)sin>;1K| @֔C?/Ǯ(bP"3ȷO}8 `3m: f9(?9iy/ȶ"ZD7̡s0WÌDwYAߙ2l|.7#~*Ԕ SC+!J XҧWlϯȄt{"ʒi%V-GM5m[dyZ"CnAy%lo?0ceNlE^;_w9R?|#Bսb}MBƼm6S8g;J ROoaHdp EJDRh<4~fɤ>>1?t1 c7!Kky,0pe K{iGOMX.Օ`_9-zc$C- caډ)UJB 66Z-^ 8u@Y^rBO]Uzߟ-4iTl1.'Bǀq6%*H%NaFQ09į: S˚jR(w^@ot(uikiMc،Vi23v611mky vϝUU?(j aܷ u:|sʽ*WЀ4]KH_%’CȡbUR8+eu`5#@2fH9'XyaFI*uJƒ^\LiMX.\ S-Ydhu rH32NIL͌wdc6eEE+P~bgAK2ȗ] zI۝[e>\f&ƒF0F}M5EwgRZ; Mt/;)`Z|.[x_K96>5]6a^ 楳]_eiɠEh-67ٗR"ؚ 0.RD })ҡ;ix(©$,n R I-;X=Nپ=ydyK5W3 <5FŨA(|cuT?}Y2sxta,Me',4S)-r~ 'm(8[K|"QuX7~;j:PHoB8ۺYB'h,LaJ@V~C3j&jA)!\ڰ%yTh?D!+nY_)i=l3}wAT}"lkTMU~tUu͒ C<;DV8qiNWI##O)AINOy|r"||xZś}BPUY5R>p/?$ z`SIRj'!ǃRJ{E,}dc`9eU XI7pe9;ک8ݬ)U3ϏSu:R'lfݏG-]4q"RY:t3ӳmee[ r L[UMm-%D[m=rFSf &X@έH);7w^ A)fu溁ǐ KIz"'0-frAD֎TgC`9Xu `Ojj̓>}osl WD6M!-Q4 Nf& ~ϕ.&ҫΔ9Nմ̹&`?7d }q* +$4^ z:VG>IA]Fm/2! f9,5#jaq&,Pnv>&/͒DQ#:t+F-DߒSK4c Șzպs݆ _?Fl lAxj8F#30ƸWI jE ^Q()0gFE>37]2{fNf|Ź;o: nAW?Ex^],aJ_]AL;;Fn>@@[fC\(4GT#n> /8[U\?ұS9 >Y5X>ء{Oۡ}ɅWv.͂)rDyA+ExBI&,+ٙrC蓆@VU͘kwݸ3lmTǵ$Ч$WݝYЧuǵks/![\lZu|/Lno$j %};R-#Ц([4e|7u^W͞dPge)IlE aЈqF:D`v^3v,P kԈVs.~FT2퇟l/p`3EǫokYa˒=&<_}0%Ӓ#̊r̔Jm5;)ѫ r6úaZ_3̲>j=f5n:PG -jh N^@yoxZL n\ 8YQ\VޖE!4ڹcޏAbunI*}6/mZZoLퟜ O⶗)6>mZTʁUKz}[l'2r#ɔ(Vҥ٣8b {o%):8C}Ĭ\1gZ5"-9N ôv1*M+e/]dgjb]nPl )<搶byjou(xQܷN ji\ه/41>+z3niΪKF5<\6vh(Z1-sh=*`R"ߙA2B.hI^W~:1Y3>*%Y8KcՈ؏Mendstream endobj 288 0 obj << /Filter /FlateDecode /Length1 2827 /Length2 20260 /Length3 0 /Length 21859 >> stream xڌPXNwkpww'Xp'X@pv;3M2{UUk}sתLb掦 iG7&6fV~&+J?96с  , v67??++.I9@ rEptvrG1{tك=f 7P Z9xzz2]],nVu+d2@h'5f$* G 7O Y\GA.w"@ `cfӿ: 43sw:x[;X,@iEf7/7F!| S+ݯY~р,`.horpsE \wok?da`n+ sw'-kgw?6`o% 9@^fV,hz;RsurtX[[|] ;O ` v@,X{ X`'#;:y6,**rZ R\ `bpqeQZ',| ghY:s sCV.V3/ב3uoDvvi6ϭxMu@/j܀]s֮^ sUk73fo֯=v:ZYLlG^.3[ ɿT ף%c]\HIbXG.HZ`%qX#o `,R"Vo`,E7F`ػoxU#w]7{5#p%~#p,ڿػO7 q-N uOY;mgWK 6kWߥtf4 5!d4ف,s#{ec[l r=ǿs\j83G;/c߹sS7w3fM e)/;:xglaa/n&^kП&t~'. ? ^6փGFk+.0Cg[ &s?i4'?*aw39ts#W B ׿.9 @榿k~': LG=+G$Wd9] G-bqr1Sy:q;)Ͽ@9x{`R? ثT1|@.GUk3?w/ dh&bu['Fɴ;%4GF >6+xZ,ecm[Jt >#I8A}v tѱX "1ߓv-t;d:QiUf狽g 4^OOfo9.SV2S}*(H$ܢ z[ QUNI>sL KcHyp۶TH+HY.=mA$H<"b)9~΃kVNYTx⏣c" F'5Tz_㪊* &?%-QMnҡ4H;ȤjX # UN4 Za6~[%k^w؋yst;7::Ge߶W3!|첋]d*-Z{-`tRm7=|[.uNa>Z ~/ik$M=D+  rxa~e6ytJatS6Ӭo$PHC&PAyt~Z߇ʏ-*%n1SK2&te j)::}I#dqx6#}XΖ&5u@u SauL)ٗ%x!o^?DCѳYzsqVn}fو޷2o ֣9⊐6{+`;qn ޓ %v#4W(<+ϻ?vbb1|(*i)ALƷK>=ջOz#](P8ӑp]_~I. MJ|utOo:#A\YEb# yKc>cfxd&o4ѐrU=_mU%ݨgVHKz)vAdXrmH4Vf:Y|P%qB3?,eO:=D6lTyE}wa7&M)s4p$ 4 [s4¹YRʌJ2h܆"L otchWJ1;[}#ʽ| VyN,+h9MTWP⫇1Ƒlz+eu hLs2Dd/i(kCI&/E04&VoQ45*d߃vZ 0?N&^|3Z^J ^ZД:El T걵]Wꙭ*c9QD2>! %Pg8T+dؒz2"؍[x`SjeEc:nATw; QYF@}u~Q)g9 43iX:z@8NHlg 'qU&1`4ba]faA`RJ㑫E&6̏w\BGCԙz'֤˞?<$jTA>0_cMmë|T(L[(f:x|PSdj }ͤr< <ss)=+tY`G}`~2 H&@ [_9 +v/_x7Rt҈%?v4${&-ȿftS_H4US71 Y ȳ[Ӎ66ַ۱5w!'階E­/.#u.}Z/|R258V7M 'Yym}˲#[ս`v1zdxMwdE/WUh@|d;" L4/~OA"|£ubvvS |%M+NC2L[t~סg54ew8tN(ot Բ Hj-4G~d*3gbwbdwBׇ]"S}a)ȗQxgY5K,ś`ckEȱ W`ǜM&]iAN/$V i J'UyMΏo:$6_C<<tA3&df']oNO<.L|]n(!0 Ό=b֋Lw '&7xn6DgoR,C"5z@>IP؆] }(BJǕ{Q;ĨADk&uU,j}bT4#88¸vcaL T6# s؟zg&tEWKxg=)FȷXF3r5^69I̴H}[+gR{*~ 㽭~sŸ&q%s۱5*kl EE`y+ȈǻU ܵ/SRǬ3!K&`=d޼ 2rՇgw#T.IU!$Oyy_$ [ϡ {f)!峿InAWc Ȝ:7f|C1"ҟT}q< .adGQӗOyնU ȍ*6=_l14^-̸+-hFUܦ}ѷ _!}1tgN!Ȇ'4"M>iZ:ևF¬uf.ץc-]@.Zi cs$ш@cxY5ܡ#˭ [mu=˝OQg4v)3ɻw=*r<,o`|]& P-?Nco.^܆,4ido"%l?6m.3U5E Ě_d҇h[nu滶( 9bq j48޾G%DO/+Y%z&/Fz 1?[:z}:ziÏJuoػgMq?J1k6c.d`sDj45qǑ54CCC-w Omѧ 9UD 1WT7L$]wŧ 'W#M>\ڲS3M⊱ZXGg2LQ$5cTq}yJj>F9 5P`O\6WusG?eYQӔM&n҂.MU lӛgti?q"x1SoȌS*5}I=\UV  ->WX۰dI̯S=b8Q ġpBדW0`잻K:Ajk-x Tyl6:a7wKoȪ] {oKUs ,q@%BG(tg&U|%k̕`Gp|(xCh*XQw^{ʄlz#TSOSBi$e$ c˄QIY2^x) k9ʼxE:׼ၤ!2ɪ/XAu&Q dTAs HMfk kMA{voKV>&rRÒSjT\ +樸fk¼%29 QÎү#ĬZDc~sPgI&CRuKOmq83 JiWBg+.DS5{PBP f*aRL)$,wnN%wF.JQh1DŽd(]q0SVsO7Un 71Twp\!.m|"/ :T_xKdc.F^Uڶ/vtZ`2\)!;5tٽ8"-趟DЅ{{A 5DK"*B{=}ycMRyCl^̗D==J7U3M{KS r{Z!mD $} 1~ $"4{rچF֋#Q1'6a5\Ee4ʼJT RHc!ms~Kx LĚ4ڣO b./'7uݴvfx0F,c?43A[\o9 Dy~Vu[K%ofSQl!!/1A.oQLOg>8G0s4rM]򖠘s5W`b82툹tqxlwRP0fmogg=$Ňu AYn-ciއ3'# ׷Ys0ޒ'ГKeFi"6.;{;v5B깦Rny#-3eIEm6$ rH&+0Ŕ|Ri #TGEej!O9hV2l/#f\ {1%clz:KTD-Er\g-D>wlȠ<^5jߗVh=g@ 3#0kaVPE7;m![J׵҅!-)|.nÞ:JQgBmu[':5(lGdB 9sDo׋ C*~ _i:}f4Jfv/"AN%Ak|࿏>mEo=Э^g#b^5bMȞTHO9Z_kEUzQb\tظT[5#ѧqJ r=Jث6" 6&)pWub]!(z۰.[&&pѾ(@abhS Ԣ{>0eH _c&ͥ9R0h|zW%J}H?",:3jUy9TL Rg<@~(>C BkF*T3@bX1y :0#3 #ܻnUQ?<&xe^$`o)%+[I밃Zm}fNdz8(su5ꄇhʮ O`Eෳ5MO~2&τ!tVĒ,[z/Wa֮ q Lǽ՞ަ2M7M%:` DT l8Кu[vf[uM&1P6Jzhp4Ly( 9s(}yS7wnX,,A4XJ2$更pZ|Hj-t/=&TWTᐫ̂+o\? 3junLJdKZ>}mZ1~㖇jyk㗝k>\M=88JfAl7XG5 r$=0ǽs]cv(_i5I Nlea.,z BT2ڣ@DbvF[йQF7P Es5ZwGp#c?}|>{)K3#_.3 +SemؘE=(@m֊9[R^<&nz|@`[܏.ffd Q,}N} /Sʹu0K>j\+͏D"QUI[gP±DJ7ȩϮ=*>斻/)':Tl fSѺvhByI,aGMĐi%87We%, ;'U$VwA; rp{*㸩 s:~r?-6ԢXr{QLPy`ڵ,Z`{C~Իb{+c[弚QHW!hFZO(dn5se Jσ5> ]0-I 25=_3*O8Irq1P DŤ窂!e;o jrNl[^x 9r>= ?KLxx SF<6`yV .*_JÃ#,-$Ny0!IA <}Hz{&w:OuwW?S],Jɍ)W۰&dÂRGJbKؼ[Vpà.w^V ])5"yo/^oDU컊q5 rc>qngyOskǽa!ϰF31B|%%§/ NDk-GViKۖYc, *^?YPOEG?WZ| - gpV}3*6MB͐U]lj79hv":a2{"M޻ V,x,Wl³c^dmfBFH33zgD֧҅=`ǻmR~\ŕKɏ.I@- _A5拪l/)1,Cp18HD]s$3@h^  bܣ]Kd:0]55O˕&(UrB\]/ӉE'o!C}]O;Zy|S-&'O/?GmTEYFg2- C s#>f w؈?Fr}bп\iJ- ]AʙCMOctw2WO'J9_ ]Gh1-{v@mEf3LUD" Yl&u!-:rLmWͪ[U^tﮉF;;-1 h3KC01۠b%[U]q5I6AI6K SRVs o"CL 4~b+C eYe O*2-R?&d`GM71^'] L~訰$Goͮd.Dzܞemy{@tbzid,-r(1m]e Cjo[mpI ,A4jA .'ьofV1  ҉4TZCSfenH`"!xKhl`${6ym7 ƛ' Gn9/]8Ô⒃iX+c?nF㲊rrwݝ`/nŋ8AVy1wfؘP[%6VvR:-́jw$S`ᛚg5R^*<5ᴧ d VPm~:M^n1JD#5ai68ð@<ܵN(G7[ 8{^y1WmaFZ4Ty5|>KɑO!O9gZ',+'FN b#SV扥JEuzb.P藜y=K# j.iqZ;';"]~jњ#I¸~))= DtE鸪|X.fnpA2y} LL9s :a xVɧwi&\"j #dB)o]b{I838A~KX!4KfؕF;{ 1Joyir-/i}lGdBNWH;Ҽ=UܮXQ>=2I)扫ޛ ݟNi&}a(DgB(;*)_7P)?,[oI ,qP4_rBPAehϭC}Y\L"ꠊvn/uiZ^kAIB2Px]%|zU] "IVIr x"CrqLQT\QXCVoѤ D/J 1K`YG)\8Dr]TW]YeR[c~><.1[Y &%X_yͳe?ըry>;ӝ aR}SϭO}(:p$C6(]Dzj," #YdG^ݦ?H 0$~#N:@aIؾv_801^^([|FH^s1}OM d!o\N}z]&oLGQEg'Ʒj˻> ;O5ZO퟇@$Wӻi[T[Y7'vjg~=Nֱ3kS9:U__k$}SQ֔UQΐK+*7E>Ȉn)TrG, ASH(y᭝/̎[Hsc?O#,3inz۱,+A2 x#m?Dq%a2R˞DQSpB& -AVo!JIאIWeBVW N.ʾr6tՎ09v)%4qPl94:M/R'XU׮NJ8_GY\qwxt%|s:?/e<i5!HηuH_'"BD=vp7-zݩn, @;(mzvc,Y[ebNt? +{:6P/M?/ њԴyc s8:a%Jy b # G2\BVsq cu~3ӸA+DSRdZؽԇ #8VU96_D3?pe ;}yα2ׂQTSa[b̥6$?\Gŝ k ?_kP'[CP~!A D9dO9ʕк$*]oi(jx;.Ù~xv"pBn&qHζ>7gL)y &>=Sm7sh(ģmZhmT4՞q3UL&E (2oV>+W7@(BO]e^5I&uK@5Cʄsd>|Xd?~3?-2:&&1C͖@^ϱὫ>T7<؊/"xqgD챡bړ5kΫ!WwD'J3zR ?]0AgpJRrBIMPgm&!,d','C0uS)^X?a@%pO'҆HCӯ4-iZ,`AfF]>A"aiq3u/<"L6997Ƨ x]u y^9BhoJh6u0!r46;;eL%&dpwRP3$,WZ([wI"ѾTgC}q;n*KTS_jA0s1TE^zuZVB}lR2yϱ"۱h;;Q[K}*qn" eQſ}J/g}ղ bݪiHv4Q zVw_lsU;:Lr=f._i#T /juF4MSLG̘~Y o !A^yj>S'`4U0LQs ) h=$P?*2agN >HF}r_dIH݇͠eʒ >O3oSt4aF\QFj蹨{X{!rxp(v'X$OWGIjUhST\x q36EUA3-n\zpTo9˩u}[-J}(Fn ޔ>-/guvSa @ߚae=ˋq}:cOYޢB3_{10M" Ԫ{2Y!J^<Ņ uFL*I@4ך9=tFUN^siNh"ݿ,wPALi&f—G}qӟ3W yb7f0ZosDQ?Gp-n?S''Qv|vi +S,EϾ.qQzk ԂjU*2lfP8Ë́ +. X ;׃Y;2nh1O0H5ʜ.2]{lVxEH Sq,hK+qkڡ0+ avTIT2\:7F?΄Q |u@>џ~hӮN ^ECL8tu!ݪ+mW_FΚ *%K/{K#\gޕ mPnYNNGI l. ~.qT-D=]n [(U|΀ɕ[1q"f繂s맸>,xЏX;LIeuGW۱ \mwafw?U,*{2b'{+=:{(_݁ W۵$f6Úl%v:B|i0N9_3քX?e*KzW6T8caT>M(Wΐ|ʧs- Imj+-JhtlJ+l>Y| -=7jFSgK>[ij L(rū$×ּ݌y QQFl A!`3P"KgB߭*Ǜ؛esN^Q^SRGwƻ$p"oH[R ܅\[{(AG6zăp~R HG>vqKx6j+$HӞgtw/ikSZEg`US7SETd6 !#^FkZkpg:PCbXimH$R\Բ!sDn2@3JΉgwʱ(O|Ѝ Y^ζ@͈ei̲Z{qa &QOg_cvǚFhky4lS.[atdW-^Qw~>TvI!ٺ1d=bmޔ` NkdvMzD\Lsw3l3kPKKL]1˞O&ilH]7is|nږ ƾU< )a`"cfaMmj;to'vx>dLr+#'}5Ʉʖɞu,j㉦ДA^}3NO|(R *#ۇcC +h|;_ i2̔?PM+w=JZ\*~Y!Uܱ>>a['# [$v%aX~p&Iʼ@N`Z\$J3uCpy.LĮ|ռݑiU4o>j! ]uS0 -/rNp"9uB3`@/k!KgyoY/f^dfL@/"L!DEψU5BٺsVy6N[Oa4`8_.\@]9mtB2=r z*Kvkx+Spڴ+*[Ъ|?dyCo m-4 :w3^DHG~c(&O0[g鍏4kDN\Y1%fx&X<"ryTVDiRÍ P$92nNINvO؋|4mmRqH_ϧg%n}O+B gp J;(a)j}`HknL?~P{yIw&8c00|mw҂)Bʳ''PGRp !Mݿ2t78&X2=0#… 1+=Tt1࢘e;ql/brp!]f.hk֪6F!"ujQ/rCq~(#YsZ Tf0#-HaK1&V1$*;ufyKd Å5C7AֲOWS3FWU V &S~JtnTaEVPP$8dklZ(|$R&l%G6C7. ^wI[:Ք7R̓{O%|H;sW׆vgrjwJeG$Y'P- .k݅*CS0>j283cQ~ϙOy\|v="GDb^&솈HjD[ '["ÏE\a)~n,@WpiZrJ(^t-Se( !m:X$롬9L^ٱDQexJ)Iu< mT)k7ǥeCF,D=`Q+^}#nu+ݎ ЧXPS +͖ޡi܅=k-jCNs|eDi:?"*Qn!/JH;*^>+F1t&L2\K]&kwTgoAQ^$\WKBo|oQZxU^Cٛ8J6\fώ!lTHX_4 0a@R H49^z@ JM.i8oHG~,-= H~88J o ;>S 7'_[=#l7{ؿhE6/9h9r0]`#fttk ,2*{8wz%iU<xXY'-`î2xǧXZnp&4yCa&X}1G; Li/ds@T1H7$rh5q3s]:QƂ|5l/-~.~X%q yp\*R2a8R 1x9:RqE# “!P.:vy2JQTV2%R,<Ȕ26=ySFkg K Dą#@$L1hg؅G AeR`>?~#w"4tMBۢ' A-~3Y;?|W7xJ,qyhpڶkX=K1 )0$sADBXR41uBs'68I}M&k< ihAvlOpV@=nKRˏ0Ȅjƀ+DK¯kendstream endobj 289 0 obj << /Type /ObjStm /Length 1894 /Filter /FlateDecode /N 90 /First 782 >> stream xXێ7}߯6$W!uYOIvyǍ趺I>8QS4>@*:ULRB #Ň6_+:aTcN2Ƥ(0)%oo^k) =Nx2z| F㒈8Di-HpDJ?EG%4S'#mD,$p$(`s&67?n1L`-/mفvKxvP <{-X&<^9(uM"xXO'dYAxN "6%A==Xoq"8[EJXxD ós0GH&É8x(X& 8&4ѱ}iT:ʈ!mD$U(L2Ie>1R DI@Ʉ|iMjk*<뫏%R|Ș^Q2;lґ | xǶKjsfo /Nih hJ \tP^*UԴlraIS]md^dJrqbd偄΀Q?&H>4Ɲ8mhuRI&*Zur"a]TcOapEt cJ*N[iS`(k~@!^(&m %,ʓEP{ttHjN%5F_%PܥSy4x$?^x-ݫXAÄ5FC -*];uGHmݵqj&|\nu_p }ڋv=-]drOfbf/uc/\}q]Tqy!ܞap}'oۏrvcfƞ-7vtv]?ng we'|ߴm{ gSMI7e;u{7w8;F}ke[=|\ߎ|wwݿ*h7ZF6: ϻ$XɽđR[eTw-nf}E{?ԕ[rDjPGٻj58iXڟ"Iclr1NȬ(qQĮ yzzc֗c9m(Ǩ͵b6V&ծ5'J0IC:&;Cm$#Hn'=*ccɍ٥gfLe44`h.e|cV§֐L9r,o3ۼ%Gcȳpܜ/V`Y+~+9T{^L[<ܯKWh- _?|*j|ٍ_PaX/Ƴ5endstream endobj 380 0 obj << /Type /ObjStm /Length 1604 /Filter /FlateDecode /N 87 /First 761 >> stream xo6WC<"P(u] ۊU_ =ٲM'Qs#O.ȅ. 2wxJiV( =O/H5^ Aka' :bF+]y" aU/YPMcAbU/[sqGPځM&ɨ9wqҎWU~:ows5gZ6a P2vc_=OCo7s=奘8vM j/Jrj=JR-ϒb1L狋:ڎyzWg. "e]=>V! 0mFqz+Ӭ|s/grnė˃R\G\}=5wdP7a;D2h_#lf=%ݗ{jŇC,` ? tKEz6ìajY+U6AF |nQ^q4n^W i]&Qqru9IaV5~>I3t&ϩ3~VNn)ڰۄ%:"1бĀtNܤ6T[~n6-mϒ߫;6`endstream endobj 468 0 obj << /Filter /FlateDecode /Length 3316 >> stream xڭێ۸=_avsEhH δX ֬Ivs#%L&)Q!ySd9I&zNtʤԓ1ZMr2uLO;Tlf:{܃@=o_k߁o3?n}$*X,|Y3['ԩ"xXXܙtz͟ot:\"Mr!a?%(DϏ}L\"];Vex/ȫdJ׃l] ֺhMFϘ_Kw~ՊP6 ':Bn[qhEkdْ!R_3X#X6x |-Mn-3x܂G}v}V Fۮԁ5 *gKf !Ӕ #ua[hAhyW׬vnj|lz-r;/xM܎PUtxiJ@2329pGtN *9QN|+JDTQ/I397odb'=CsczvM^9_N5X@Բ]#\x\G& OVsFf[7p"9=wI2;+ޒZ"37љ@ (N{4e9=ӄ,")MUwA}}\RNጪFDt-ԫ-%Da]y6l}Zn !⬦33Mp#KFk+oCTr]-ںCe!TbyyFj2]w+cI̍*lșS8,_ᧁ٬g~u;r8qX.=$hվ"xOOTC1p >$|>/خ ?T;e,>/PŴbxĖ$-qTYRvV a|9w*/ d#bde@|@Ew,&\P#OܷrQ}'C|f,'I ;\$z6Icm}6vqPKʷ8Tg v< gHqQݘf$J3miΊQd;t&[b ԹgUEW殶g-K{&J*+ݙ(B"h4z1fAm>hsA&DXA雀mġ+q YB=VQˏ]YG*k[yF)Cգ1;~>`RUifgfk>z{(~L1aM>hҏpDArm+$(9zQ$It-´ht&Ձ9|x;+tO(Ldf0wUNM>$tù$fFV܋( e9u| 6:^18!k1-<@%E\0 3W 'Nx?2Z 0{:`@9LD?Ǎ<مx Ig Ҝ9K z$J :80!+ЮQD>ʃA,dzEnu~c>!@|h<Y3bh`~nAJ*pݝHk_E=#RBCN'JS4بE|rBQOZRS\6ٳˤ"G;(븀Ifīo)i2D0ZV@jwP y.kvƖ~ycӼ~N" 9u;θDUM6s"NzU b1FFxj?ë]l^:˯iQm8hAC)#W//1d(d:_:q)z^nHPBw|#ĤdTp ԭe6wH2A3;6"M큻} * {*5\d>Քĝ1^<ˌwͨxBZؘn/8$B|O(RughwPn:ٵnx}leL\ z I > 4Kh, 0.ѭ|EvrIuz5|'B+(Hz)M(Xtv\Tzز?42b`zdm?'M<{lH6h{.?ܛXiVH 9LSp8 \A5:{nN-}\1 YV2|ƯP=u`1jT _-)[OD~'ҥ\]@XJq)>cN ) ^+n4ͬ*m]띱 <}!6:j!eP򝯭[LAQs(I}l}Y`K'Kjk \G3s;RT|^2O 45bJD꤉N 4^:p-xD ow^TY5ZVrHt%c[_-N۵~n6bۇPB1q4ɦK#a81˲5DN]&thk87c 8j7N*'UeN $~jn'د2Y:Wږ2g;nQmaC5_1Gy͒q8.}疰-̷ilFhI4f튄ppilH;\Gnѣ2ЉXp?qEи$?^n+6 Ze5(WW '7(i]2XR`<%AJPpt1eϔR=u}*Euqj ABKu:nn&K F _g?z!&%IRU_$A U' ITDZrF˨R:"b$sqK;1CAK "I!l{'=0ęi'/1ɣt2*/ĥT@|ɚLL+,B>Q}F5v0{\$+Cyƶ9n/}xIe2ڄ> ]!՟4F+tѽm`|7W@endstream endobj 469 0 obj << /Filter /FlateDecode /Length 3558 >> stream xڥZms_B3r!nR5Mb; *_}Ál' E^vvݣZj˓/^jЪlT׋j0oxd-Uyy\3kMNug[[tkh5!x6Y~oLg Ga9w֥LdG\!;5.É'J_\DãZJâ Y,'oVt~&,hva)M|x}Ü^uaYƖډrOLӪo+j>_vhX:4x3Rόq 1ҡ L,RjQMb]| k_^KxS 3V˛"řw="'$ORGDIMh[1^uEmaKㅚAji{CMSڪ$zpqo.H}Pq]־RmlۜnrȸȭΨ'P:{o#߁Hh!ȫQenq;^RI]VHaxw_](&!9׸~+;5}4g u.A7 j=c'y)ƽ.-ăJ[.uOY~}'=-Pbm‹30:k{?>i+WpiE{:zcث/%D|aUG,oK]hšOH Gud[jǽ1w]a/su/!gEsLըu/ܠDI3QM_DseieݤA2s1<~9DE{bcŠ3o>F܈n'5wjoTo*s1nwx0ꗔ]Hw#v1Ώ g%~Nyn 816V[(0O$z&IAjv(bbgBfkxM>Q;ۄ=xi3i1c2G3(9eaKܓȏxjn"*5R {sEJUsto/eyǴ2H"r=?^(-8jK 4na葯T\UH6*YnC|?vh&BٱTEwx@nFNOQҳLi.h"^J%Od'Ɂji[AH ]C>;:C*}&W%q8Rx }GnaJಡEli#rVH/QjwuDb0@iݘf|h{ ș$W 'nhWdZBW)G4a& Vl߳ϠfZ$>ؼЌQ %*iZh!giBhS^gZk8,bd:␖6iWF7NR܉jNS&cFёo!O!L@n v;DOhHuǠb=ʼ>tS:BZw2 WlR x 9!L <+| aدb!lnk9P shEFVf>rLRH"hF )8$B#= sDghlyMMQW`V GuK)@[ 2y:m 6_dwoINW/y؅N ,Wf٧Ċz6cHZd/-X(3V"3販SC>nRJ܊.0'˲7#Eo펿7R iݚܰ.FJ_(T=JGKgWST7U)Krwg?;#DhH^1=8rƕ^g7sq=qm!)׺Wt%W%q#'GK٬֗h 2,a`/D\Wziq]'4#9~oQ%Zn$pJ C2 2B+t^AcI.xHCf!{ vL|Q:&tN ,]2qpP;EipZz oT <_3 }Ma5 Ð#wR&T5mO=?njTX":}NAgi8JIe#Kn:mbE#23fEH85f ّWsLUU0!! vw=,N/Rs9MMq,y>Lum)k\=||UlQؔlD=cu( :XäC B2х{TywL^H4v8hߣt.bX`/]E%E1'GEL}զ8_(Τ}YZs H6E2J5RAv10dZtI0 Y4)>lO Z R*xV5F4| Ug#BUZ;IQP##ʭȡ᯸wRIxX[ӻg" g 0i6H)P d DwYHZ,CLs ,݇ly7C$=&κ񶦿{k#@5<%m'6Y7H/&tzH?Q/]L7eֵj;tц@Nѯ\Bh\&ֵCj*r< z 7TA.TjF0je{OPRq Gf79T'ŘuG4yuA)ā⌜Jro{z%%hz,fъu_% Zu "I\td>n14؂: j1x8un,9rO@??yrM ^" , ğFqy(;4vԹ0`Э*bErU?ɍk nز+e~?~P-.խ93l^JH?].2~O, $d6u}:ȬN">d5#MpR#H^endstream endobj 470 0 obj << /Filter /FlateDecode /Length 2348 >> stream xZYs~ׯ@&x.\Vi7V*ݭVRLBm^KvӃK!<9dp'_,9> -CME̊$чFQD4yNNYcC3VʼVQ\A)@ux1H#C4EL!|4񮱋'`p͘^\#PTiӰ-`wq儊u'u,TlOr^~|=żH,EC~ Y[Ϛlthb ]VfX]1+>v0iQL-W 6ea\R[ßRJ8Y I ',5эgʻQkʙHS8b40Q15+ sشih?1<(gy+m Gjb.HǬjmET*=4#&hH]nII] *ܵ㤊Ř+X!Tv{1P%U`GWVaL.}LyE$kUL)QMP88^;);Fdp*X qg$%=~&g[<3WpUx 7uMߺ|Be3W[}³C/ƙ듛|_gM!N綀rZ'aD40a -(a| 1 yZ=i1= ;@oWI IR7Xj%1 7MyHLT||7=h_h,#}AL92V7n|UJcZEA&LquO\9i&aChs-7Z`R-fxZ?nH}U 1oRqpׯRٕ^*Pur[ὺlECny ׮kfOF#U#);k855u1(Mܪ^GI0dAZO,XN*Ҍ!M, .sg+Qt6\TVNULx(][s/NJaJ;[$™+Y,SfF9N+HBټOdBej'J mz&Hɶ˲/$al2Cg]\pmO]Y{;$,U]6띃 L`8vʄItmεAP V::6@*@ͤ@tF9{/,Śuc'_w-VVMl5R|'A{fRrUϢ,oЭ"s?ǣn}} 3ۮ~*7kXE"{vrVŔ&n\VdIVSaVׇ ^i-g OLihUS5l,6=H5c^wOC_gR36Sja>Z$iUG܍z_tv’nqѭŞqtj9X0l t=2;[o&z\vlv oO=ҕX{w,A>TEGOb6)$X3Y>(fF d (Ow>"ϡA= 7Qv&qkc{ŝ~NNmJz'Tvx)ˌ|vi C n ꟏ŗ_xYUp}$O/[=;0a1@xK ڶ> stream xk h7 &v1ZPpxw8+;3;x:^u~Hcvv3K%iD? a"ER$E,.gۑw?=y`<48ףގ'JQ 9<.NU#j ᚖ V >Ŗ8q3TYҡ8sDg~KpQtpG<& J(粈ˣh?EIB=D\vI/e\dLzM{wHK{)O[];M~ Z7}ȯ-#^^Γ2 8U7L=P3#z!a -Ds}ڡ:yR: Nw7ops WŵKK4h"&B/^UR -ڀP+o^7%{-#{m/LJJvj,C%{..2M.R5{ X-R"JUMlجتmI,V𑌿$&@hpKqS܉{HU'cȷU0f:s+)Gk0NeXm==Nv#>,V]Vf@t'B߹_ lKEI/$M) Wo2ԌgAӝ 'KeB$-N-@mvݞwW'Ļ /#ٱE2z&$Nd >DO_m@w8Ѫ]\ K. HKDDJ aSrwFz"K!L9TZš꾝~/D<:侱?$A |EUCw{t[-xz[]`G>=|5AA{,X杲oB]pÈwb/J޻ٵĖ-rDTVqoc4=4yF*3|ccU\Yd"6Fsav#٭0OZ@)|T_qdG{8E֐+tl9rMmO:)m@GrHpfPq2V 0Qi4Q:N!j*Uu9&% phFOd6Zc ͜ BaPL}ycL\УlPY ?[}`Ka I B Z+>7V}x4΄<NW&]d`ၗifIxDl9bV\PQ GIr"i0Cn샋4.3OHz*ZPZAk:DJ^!H߬Ӝϫse;vj<:wnl)& pEu*2%Kw6 J G䆮6(Ѹ~9_1і }C.:aCexMREaXd3vHqz} blqlwίʾ*xp%UNoa_TNd8벍Ή0ǻkpl!ނW^~5,MWE#n$d&ֶ4ĹuO)VY^ {>q=ъIaтA6g!m۠9kd,+ט->ܕCPoӳnG5Η81cOf}cTF`c;me_ݎ& U"F),|P[v|[Z)0aUmӂFM!Z4TqN4E |-|-IBwI^?$d~Jv(K#xwA#fn n Y@l_K>q>\ 88]Ѣ8oBvVvȷ P9rB1 IRօV& SM{>԰P 4χ 0 s >NÌ4rLPYW94xVe#e X#y.ZAGf0ԣ8lP4KuL!COҫp%Y)|Y6Ӕ-gH4>d Kq҉O A*F\O8r_p[p&sW"PeL4ż[_Vz(r',=J#Cfїm ^m\]g*h_BpRDz~cyo3CB-tNjgcp>mLỖKز,T-O"ģ\CH"r P; RĖ,\|01M:ީm-WX fZü1pe4Jvdn,Ц`όlms}T;(D+aMnzEt+dk~2ܗ;~~6s֌}JRVZ .,iJf '@ЖTm.exG <,e %%⼄xe)gl&y4"XFX`8q/1 o9LUendstream endobj 472 0 obj << /Filter /FlateDecode /Length 3119 >> stream xZ[s~PNL#ɲF\7Sw2DQL$R!)j|m H1ʹy,uYvʝ/^%KU91F+NQU_x~F7yo) xg[h<'~puzmY+*ybcE){vʺ`o|RƝ*NW8.e8Qsu2* Ebq5nqU,^7==IRϷ/ƔG t,UXȾw:µ;d`1Ҽ`q37kN(I}hip;嬆bq7>+  VÃUU^6/z2ߟܑu>Wya[n8ۜ-8%?h]x!21+uؿD).3|WtNқd< l2&UU+ŸP }U / }]T@#,A^C%8 Gm+\H=7y[pی}nq8q> ݈my&q.rgη~¾CiH`ߡ" .p}?/QU'Vu: x2ق?&^ Bu6E-~V ;r2MM1£:G2D^ WO*_?}PS;3@ra|LX(Glt @fCy]]{ ҂#dk`0$#v[dLdLQ-NpЃw*>jprC _㓖I߶«l |⨏-{9bt#T6.]( @.]O|IsǓFU=ь[GBOh[iX|죱y_mGlRw,%og >oWhZ LO=mmU'A{6f.֮[W.ډ[nE6ڱ7U?C#84^\;E˥ǫ >N25Y4  r!@)Fs;f04=2l=P9t ,NrU31Xe\qlS#;p׼VqR"(mT >Oژq\i4˨Vܧܸu* ?[GNfOBKcĐ ^G^sL ʇo@j{B;rȕtz}r:A󼻧2Z:XR07:We\@(a'봼9TV }bMK;Zh9i46w3on!)&`g#(z񳍛ϔ*]^+3YpQ<_\,[Zж!k!ߍ~_LnncX/ SH i E0T:fSV/25baF2@ (&Y/gcpçeou|3W(Wm&)>v-W[\ğ*&*Cέ0amyBZfK[ubGk{5D<9! 5 wI곻 m267JO{Q%LbY)ŒBT:>SD Pm!c 9aPԏH?%}#p|7:*]G)_q'G-UMH̪V533%V>9(+Ė %7dVu<,v7X/LC.rY7AK@ǸM_4sQ )^F::jЄ#rX<~|3p`^(|hoIZǍ$bȃ춷2b\F{b>*tY,D'"cETI0{-z|ZmM|Fn1bZIN0p~2e;+ bf;~{q}ۓ2O];.&>йacd> |A5_1q3JkL9[ePˆrW@]&g;3xLg N@,t X=~oN ,o3u]U~]̝2y6 ,0g*>e䫔ùtd\a=WM4s~YL~B3#1&5|$3{2^./)xćI]"UpLC;5ܧ3GuN0 XFmjl![8/I zClԞYnnmLu.KX(dw[BƆB"ǧ/Ѷ%,J)2R&;af+%xz)V+é%JQC#?l<:T-+~0CM>FYl |NHa"G/?% r?NFAoķv2c|AКf%N@˅xr8_4/;^? NqĄ!Mya[h}7Ğ O 5vsMiq+U:BH6*D20e/e _ ƤΝfM\.XH[ ?f<<!XFuǵ)WS+kgTeBKi˩Grӏ|3:H:n``PjCvt?`mendstream endobj 473 0 obj << /Filter /FlateDecode /Length 1233 >> stream xWKo7W,z)xi>a@m)\(!a]ˊ- 1WIjahc/`N{9]brA(Ilh9QCHPgfR!TWVsbܢU{,=L5L%[m`.+".YǽeF\ H}%M8DQ$VzWF"hJ-{>Oh/ E1xVaO,, (;iH]JPdXTD5iG\"ޮ:/`YIxƛvd VO2-}^FO98G<Hd KWxO! ^\3Z c$yeea.-[ vΔf[Y[`+@p *xg+^aEH}Kj nzt!dR,:R>R֝` u| q:yxƀowHҒ 3,x<e%i+C.ѦxMp{G57cXMi H&{*yf-™^0DK̅L8q0O|NSƯ$u5\ xM*fNjpB?N#elduPƊŢPt,XY-@%sΙS48EN/w *ܟy[DkYzopJ{BWgDjӵ! )va/v=yV!SJmڇ7pfv ^ds_;dW-b3 q\uAۈߢ1!JpP Fo$3vޜ)j_tȿtVMЬm)ұ;oC#L=5ouϔfhM)~5俊% /,{9z=VvN*i!)B15ڢcaF7,kfsd/>Љ޷{ Vlb11]/_vs'ީnLW͔!bpAj,]euw9"ǖM2Ã+xendstream endobj 474 0 obj << /Filter /FlateDecode /Length1 1567 /Length2 8652 /Length3 0 /Length 9681 >> stream xڍweT\.nn$8whnhI.;AGf̽x|U_ծV3irXC-APW .RO+ˏĤ;`1``(D_9Tu( yyC@w5@\^0q|X|bb"d@0P@N+ZP+0_)X%pgqn+7ft@ ;m wsXL=;.]  `+1 b *4A?j8mtEN :9!^`-h*q= 7 }@G2cuj;]]q rP''>y0 d^<ۀ!6[vsч]@MXlAp//yZ^Dc~>Pgc+ ? t07Ͽ`+8d ` ?>QgP??@M햕z|\B>~A 3iUXe  g;R_\GޔW[;&E7G?_ /Σ} R A,}pT@l'+YkVv*OsC@ZPW1|fx>q{] 0 q:A Cl0G+"Qm="?HcX xyS9e|Ld?탕 x9!M&VܽN:/3_‰^MqV=O{EBb0E6yK`V|NmlʐRm߫`%!ƶLJC6CbsWEZ[^3{t7Zs>jeFWe/al6~$6g.{+\᳗U Q_w.9-(9 +f|Wa퇵 w>Y>V Lg- :*&ix! ([q,Z1}hա;lщTD{?qrξZ/%ܻ0yˊ=%փ(b2 A[MzSVbpwe %NʷI ˼>8M,07Ӛp[D%nb}@qעӵB.]]qֻf_h#o SsHj ,7԰DU^ǏsțWϋc'?Lŗ 煿2#EGD}*iYWlfX1ۅ[iʫ‰G0;{nb;š`7@C+-y̴'HoyJ\i\}dNi2 l! ]޲c^"O+yyT%TS2AM8M&Cń'2} oM!߫2q&&?[(::ihOW_153m"D6E~ͧwsG>"ӭjx#oq9rhͰPU*'C|m|^)R *x "XAe_VvH[|z77O2 N`E1S>r|jls2]tsV|4(]y.#m|O/qC]SYv!,»RSTrE{-ĠHKmFZ\GlITC4Qw[,'FMѻb8:c:UF|^3[Q\1z)].ݢM85DxctPbh !l4ci)v7Q:BnNdK#݁@P<5챀-;rjYO&]L䞏5"H\t /]ڦyOm &FٔJYOI[\!;e.b߃n"6HD8Ȅ?z_Tːԯ¼k^bg_6F6 <k*6ksR_ګDp8.(Co.㖷MY5Pn_!BbGC:>kU+eme4뵅^s}kUdWH *a)uy#GuzTw6"*e> nɤ*:@yN;Fizb5i}<yEK]uc tbyb8<Yc~pg.Nޭa)aX Z`wI >Kq}$[σOO~9,̰}4WMpI~J+*ޘ_qp+^זˠW9~"YC҃FK3z#}H]QN %v?T18ң4ߙV{^ZR(_fĜ5ɯqRQ4uJA:{{캉nA6U׀9zST?1g^a"=3Eϵa|*2Rb4gy'!eRpF#\:7B](OZYT~&C'p:*&Jc"+st>PJB؈|3z5i kY "*V͙3ћf*ҜW)J͜i鉟[XpK7xd7BS>1.8a) yþ:ܚ 8\^U/AnSИEȂfo&$f|J]1 U5ŋ*U,JiSeÅn  UY֗> `vi-iar< (9cexw,mG Yn;vnF@cۄ2y۸TɪЁfqLqD ~c6w_"YEzTiZwh=+]>7_ d**D$Gs:We*=G&͈G) /[zn|MvSnF#0z,3=@95hy¥?~>,\9ט-F}؃IH uҷ46o>;r&zvy glT: 1E6ASfܶq« M9l(CEc:,F09*;Qb=Z6,zOBH޴+Q-FZU\q,a8"-Wcz3 d=8mNi=KLN n{=Dhr$ҝ)Ft|]2s 7+'t0VLEl!GPB/ ҮΕŚ c9j}k̟/Koc!χw_=wX *g&v;A6+Ijres.ʄ][cxXEdܭ'΍Mf;n:$_|1Pq' !D۝lU#pL:̡|9b3H]j*:#vgmɚ9hZCZŗ;ͧk ]u)H 'x'rqL8S؍|Tli"^: x}|H!O~T@t3EBrfbJ撠U3nd2o"띁DdR]Evp<9n'tzȂßҢ7]q$]\WqGԀc!_> 9ݴ7l~Yĩ,&PI V Zxho<b !zu/୻aRtb'[CBޏYu潟|3LAZ݊UtII'X郫Qd_vRRb]Tk5V6V*tprO9xʤDV8BW]dlKվH#l+j)gðœ> ,o9mPŲw;% wK',]wռ*^,2FfA jONN`a؞kn~ Co.7QIZJ?t&9T "⽒R.ѾLk֍Ay-G=?6g:l/h,Xo"9L4Ԝh\\5"품zȽ ^UZQpOeGwI+¥i,ϓ ̫#_b/ 6;yN.Ro K6*p T Y8?,JƢ1/X9$Еi#DЃRnD;--Av y~>T|BSWOUλLUyLk RrO,o= y#_&^\dnڞ+麕H85kkψ7s|l5J; zuRru# jg%u(׏F;~v+4 O 'DѶug;>E,L)N>ٛns= 潴ܣؽW 8!BgL슝ev!^}url, 0>#p@`DopVtzQNU`13RE-?sU5 y|.ȺIk}LUoU4)hF0_}LFM-e"ѳHS/o˽1ץ j_:U[KdTJ9so:y6].[Un7kv1| C4N1Z艸< "YO]v뤪e2yS 1׶6ޞcHu C4Q.B)S%+Y:%lzKTde{(d*mdG*R|_$^/cF*%PњaG-_ VngWܨĶOOՂEt}|n⬌Ng^0!:m6)Q1ǵJ@)x"U:9mfDkKeP ;;jIOL)S ]_1#YZ5 ~Hz4JФl!ikŪ ۴$1w iZ&l#VLHe\,2|XENk5*/3}8ѻ)"$ʱZ"P YAkڦ ZtߏI,,*^牐Q곾4v}$k-2౾ix{ il~.fjLߝ4l`އ76go9#~%pXȼ>{AOvc $z'5J M!=[9 ?*r,3q҉9.(}x<k/^rl_tN!`MjG$ݚ}ݗ1?TD4s]gTsGjgl~iGZUb]Wm,}9"YcG K+om3yXŠyxxFP!CWԒ3iʒb}S[4eM#zDY.pgɊa\u)WwjHY!P[ȶ2 DЬ#Y" ɳɲзy4,+9Kh8FosJfHIpp28'}UQ7ɫWZtm%ҧfwi0 "eIc WyEKa4v"Jv7?TՍ-8y5zDך'm3Mm[(yjzM٭(xe~m"x%lvo;`sVuVmVN HA|Sci*omŒBBCM?l?ZCNIpg\E8>g@$!"[l~>n(xJA+3C " g-m§XWl_|*"{xKM4@t84L}'/6kH|( {U3':!Rxj$̷+mdCM@lw[$Sԭٮs B2[3%*8M!$s¬MWh\K7!E{=?*o*.XI7iןҧcB"Qtsr(JD!-t<76ɋcS/d'0y|~G [pBe1q,\k"ۚs1I+d}xY۷wBAA3a{, h=/䁄콶A)9戭fO՞!VZ iTŜ1mjo㗟MlP]Oޔ3;M<[d-tH3e) m{Lqχ{bANN8hOg+ʳohcQ'/ҟrW5Цn5Ƴ ~ʬ:m$leP buzc>cC/(W`Qమ<-aS &$B_JCXbo%]?iha@9rW> T-5#Q[ Eg pY|j"m b{WYךR}V5 N_> stream xڍPٶ-L 56NpNp@pw;>*ƴ5\s.(HD@I  #3/@LÅ@AreE:99M\l&.oa `{- `ef@/@ dP`Ȃb`O') 6p:L &.V@Llj`3JP[821393,i +*4!hbS#@ Y lnlAf@{W{sl<@hW_wdQdgdo J..{?Mlo&n& [ӷ?$ET&o C!eޚ,ao.ڻ8#O4{'ӟjcvdonsW& {+PF?o&l@333+zY1Q\_oMd|l8}o0L {ᷛwy_oe'eҔVҡS.QQ `afp}weX0+coEKC?OŠw-E >3/13oGO/dۼ;mwuU\WmD-mn"Y4WY5*5X0[=P A003/Vټ=o 4}b89x"]x9 01ڃ]Ro|`'?$/`$F\&ɿ7;Iog0ުޞPd/` D1-l& ۿBX `Wx |;_|ck/FF_1ooooTq:=@_߄:m_`y#/Y,oĝ2Ֆ.VNxwn_Mۿ [=ؽz*_hm:@3y_u]H]; ŮV: S2\ MuvІo^m egoI*m>OF Sm؃" {>ώ>6Pd)]1<WwU9ʧb5bKf)º0E@=;J,@{V䭻w?V܍GKu>6E-z*]Z:_D>m5mW6ɲI-k4r"Uªn3petZ챰N#X普q$~cMKnӄt9lG <'g'6nbcZFk%+Z7iZV HH=#\Odgb$,s8FBB^LY Sv&&$Uza@ 2ltT$ L\uC_&JsK5B1B;>ԁ;]M2<6N귺$cNE!m8)z\ i8AbJH w8Lbԗ2XQ嚬zֶ6x{TΜnDQP؍]cuvި^dP]ʵ'ꝥ9OR&IaP?=LNCwϒEmRfʇ=Ct%a!QXj|yHeq(r>)>%-lyʩxEe־:Pv-4O!MDڍ،Gn4d-~21Åp$Z{//$ɕ9 WN(N:nvon+e@򸖱"ܺ2Db\0j- HR,c@bKE N(;\u,U,x=a+*~1HHZ.pV; =P6ED̉ mN]+ȯb,Dޗð#Qx7~|*.|5:Dsgv4KUWI(E^AW{]U wkck jP*GHT5Ptc K|kwPιՅuG?^YLtsu\$!Uv԰E.=|shn5̗U(҆A> r\{A2jњ]uѻJUv|=9h3+iш)*v9WݽN͠^;nVRk 1I-v7ʴ$Ma_Qdo#.y0<--`o׷}=nHu,DrW*ny&ĩ̭x&艢rSB]@CYxl~n&X; g,SȑRhC4VM|@|QQ_p g>rb@T_⥫{.z«7(ɚd5 *6ߌUuL5W\h tcoFjͦɶ4 ]FO!ɦa4,t֘g Ut'(#?.0~Q!~-A};a,.'AZGb[ *Uz=8X=Q=j,,d]"ψj#8)r>̧Ll /SGE7{%//>Ȉn?PiAL!sc civzwR< 5g=f\0Kj% ~M#z7^DԤ7KEir1#HՂf_6HBp:%uO 6Xv_e" " %ض9-Uf q6EIbtȵ{|1/Y{Pvi`H\-d B|g^9r3g 밈Xk/$Yj^o;4A2(EAn;ٮ$s0"w򘠪w)GG҉jdN?WDžkAߟLW";UpȱU:չjwSBg=EjO(H>*~3FIyK|R[ksJ 213e]soPh2Ȥ"k9&Qqqc% C|Ί,Ԗm_1fI1UIB;$'\YTbHuaDWNR2~T/~C Vt6ukCpk'<8fU!E*-Q(Sn=Z?>D=F\FґZB2ʤs]Q[_gQFLx1݆=^8$j ē͛#@>ցqtf老KaSi[$J"ɀJḁW M^vv >0I''rtqr+ yb 2]b xZi ng ĴA0^ǻ6!6nB^Fjk]I?,ֈu ԰όyQӖ !?8ޙ|/] 6 ۅ*:$%]M@_2#qK3>= 6: 8x_-NfB5K1UŌc:V'dT˯~M *1(?y6s=s"%5ǚ/ב3x" ~̗7^lI _LteHI"4:٥zSǷ>/VްCku@ a8V8 Hcի&ׂNa˥&Yh#1U1S~j5_LҜxf7g zdoc7@)GƏ -yޚARl9X.j}#V%qB盥)yPavGđSO[;%#t*K9];%N} Ժt"ѢH=m6V9=*DeuQL!pQ_9fF0T~p>;fFKp*zk *C.5R*-T!|$_W> ՑsQN;PŢӨj)C!rdlAGP<#;,/E.NY-xNj& _)y͕(uK9 Rn-'(c|efݭdz3kD}cW@&ԏ0[jAm˶>uW]-ݜLSx< ֐|pjb菼hJºp@0P V#-/^-5FK#A#6B~s8.1I`ED@>)?V>:Wp_D[E31"#j7F ,uyjX{ј.dIKɹkh{4DhyaMmoPV0UAl%xO\mJ! D1τ]WR0UJe-awEuX4Z.;Eu^q9~sEx{BT6ϸGNf$KD[Zc]TR۴5}@IM9NAm*@QTۦ8e,Q8"bS7nǩ0nWǹ%4wQ8Ve7 Nhֱ_ݫsUƞRS03; r?x{]+ P5g3ns]e) ׽]ɕVq,*'U`Bבup2ׯ`@LXaaJ)S/$EKX&}g̉km3H pbN5Wh'*; ?r񡰹4U\^st7Ѭ3|tT؉tg8@ؓ+Ma2dU/p2kt֠"̟ q|89̏$ltJuǘ;6P~Nбȶ$^&0W'CC:\IKCO>M/>5ݬ 줷MT2Sl$yƭT˥Bnպػ9&qvA=)sFMr:~(=MYb0w͢W^MZ`BvETzQʍAkW6\xvB}BUR̹15=~ԁR -ǭj0~%c$wc 1`j7S v9sZ'18P@è#:90d2B|<,]w|Sv_; &NV+XT,i ӡnۖ.#0!&wgPO ?KE*vz,mŹO&A :9&PW@v(i[}XҌ9}@ +iaej暇#N3}G/ZoT [V3X P@ݩt~dSn quĺ$}I^n"ݫ:(gW R*_nh!d3] Mr.`Kٌ9˾r^^ 5@:ӆ _ٕ?V_/GiȈמgX7wdtVΧOI:_G8A_{> /Bmp(+l~i]F8fʛ6[Wh5cF9_ltoGdYeNAuLbb->"MX({% ΃٠fs=pT́/KzL K G8`ZYLz"$P?Z?zMYxTrePG8AMuk~ 3~jrs~'82qڬ2L|%L([Ol W PXQvw7{̋-".\Bd]p βBc>f_62le*TCj7~7 gDx|U!}񄁅wok'َmGFU{emPFP:n!<}7NnH۹B.0=I%,4v NFe% ݇$F&~|7#A,ssө}=.̫9iFAVk[9N" |՝CP8x3 }Ij[ Lýp7֍!K R?_9KR;)6gp5GӉLpR:.;OANR{yk:0?؜5 KNk#8Vϥ4/PL, >r'L=*꽈8uLL -Ouչ1麦xe(sV%-ag|i8_6%Ibֹepqbˌ\^b 曈mVgLEku }RQ&8f;G8"D"͞iD2|'x]}°BZ`fE|CFT#DВHA.kU޶Q襸GB=s8ȀAJĴܦΟQHsus& na#7 \3e%/).vWw xsjp.lkVv6D*l,0ű({hC?>O.H'#'f1M7x I79 q_i!W[Li^l.Sm{SQNhHcܡoœm -GzzU Jrsx|}'(b Cü]vvx6E$dzT:cNP[2^gDc*XP9FvV1&f>ދ~LYw.e ɂNTK)G^z=4!C؜ᭊEӕ# Q8oz!֔.2 J-ulD>VĚ)2>U0WH$~xMP@5-j}DZ"FE`"||I*O 1$s}yY[2/ektvE]TpwXft̚\Z6Df\Jp+ f S!^hd7Rn%LB[Eޟg^64 d1-`1"%`>ް|XM\Nz@tFg>bbo8jNͻ/߉N.fnOBSg=TB`18yI^+nac{!ϔu#tm/84,O&r wBcQT3w1_5G Ul*搾xb }eqxU ńagZl Uҫդ+OwDn=_@>D,!3~ʭ54S/r96hI| GV\(,Kg4dq/y;|uᐭzEhP>M[_DMjJl4cfJ/eURCq8: Fȏ;yujU,sD!Vh?!Fa2e\t#%gE!̾lK5 Ap>cļ.`G(,4M\(OZ"0|fu%$FY/HǬBMvZ]/M>04j<8Ynr@򛂣ȖN;@q̾o%kcQ$@ǒ31wo_Ks~ܒ"ְ› nM$5''QU'F:i"GQ^Kxjd~HZ^0) 5A4?ch/0"r%nf"?woSK5^8()2/a+B˙N"8 SMXt΋^(nWPf1l $:Tv3L$B!S;j*R|ǣJ%A#3#'"D;slXcj4RGҍoeC*& i 14M/"N:9d'[(0~͵rjҷ8[*ǎv.,@T'O>s(f?u ט8^t5ٝ1~܉+Y@z5:Ud9'&tcH3OHzuPƋa4hTKpY, Yv8rZ/.VTnL( -qO>qQrcrD>C+"@Gii3Sj}MG{@iKjoX̏o֗}hyV 6m9X~T_>jUgͰJZ+w[A= mдL6nź05WJ\M ҵdQNP>f\{[́u3jɣl6i[\pUkjؾ#M &`r7e6MF@]>]D.}MF=wP`yUSႅ5y0QgŖhFC`uIʢ'$ ,.{> c<7S;l+_ Ә ZY$NDVhbk7wEdn7B t.%PPe~flNW8irzۄIs4if{+O)SѪ󨢻pv,܅qkA[œ8- Bszm آ1MOL₂ ]\_7w \-? \͎NԂF:>AQNU]=B3;_oM=)ӯi׉-?@H3k7+NY&XZ 3I''8q~~[G#PJjh Nq0x/k#OR _D:_c.B*QBg^v"7\xM)MvR(4up@W].y`G hK\;V iK mQ g~U6jv#5P`eN'ة}83mf*[_\-IB8kW޼:<$EgGAݭ Kϒ\"@uӄя{h~BŚ6~ISIom?C|XjRkŏv_*FGWTJq"C^/pwCh?ʅk< u;I_mTqCDQ;)zDeP5;XfDTr{v"+@T _LbHkP:5)-kg~aסfIGo¶]U1+ax-'_8'kĚ ((,#EhSVIDY:tZ!GziPqg7Fi9ڎ<#vý;NR<>M߄PjBU^x nUIQ Y,C%KmhѢ6'lHMr =/y/y׋Db39$hv&,EkQf>~F*uh%rw]n4*&Om%DwȏU4Ckg|Ixpk0q s\V8xO4*(Yz &xF)dvb6H\|`؟Y)RA,1^la1-ZTVMYn\4Ͷ 82ytCSkvZ|VY>Ě@g<(w _ٝ`$*8 {~C݅ .>elRYh8oMTGV]׏!="{qvdɂڈ$ 6h<\1[Z]vtǣ|vp $ @}F BБ{x/AJ`V7@978;[l~گS-+3 0\<܈E I!/W}TD"]2y݄ =1sq $;7בoH,h#]ʄwph |ҀETsMŕQ'H"btHKHPeLR)[pS{DqNLдֲZv8_Uzm,#}.?3% k*3_M养!] ?_.wrw ͯlu-P%eeb;lvF Vq7CS+QhZ=X qYD?0*T 9z:r+M$XvⳔi8x3,,fd2KJ'=īMo\L(/WzٕF;8\|sɤ@` HѶ=2[Iz3^N(Rk؏R]^S|[] aqFFCpLGg {/EE>m-,\{l 3p(~r,k'Geb{}\LOX5(WmzVRl^!EkYBp:E/Q ī6ͣvFT^cĜ5gYx ()1-h_ (M0dt'%AH);1C%rY+Gy/Y MXܘ &Nч F": )nFOS'ȇKGnwά>DE:nLEÖI}1~rpcv(W+NyrWQZYDՐ,g1Umsg7$/z$Q;#0 ʑZ^.0&!((XW,j&/-j ؜eYݒZaΌfNrsIE<*̳U;n(+eQ+`;Ơn'y6hTÅ{Y?E%6͕nG DZi3UZk~1q7E[ j w~gy=p";mPonf@G:" Ll^ 0:z% 靠>*=N9CWqQ19~C*[v|+o5ْ2NL}R=|MX+y RIKmbb1ak_>Ƈ=r[5Uꙴ\E}]8 :`A& EZ8cQ]GbH]T&RVpҼM˹/s7u 4CUIidMs~ȗT6{.%)=@v3{hb +H@G+ٱ>K Wa{t#@6BpO 59 FPd!{Tk<~#'AѦ` /h#N)򌝛lZG,<&YsMRs:>hVnb"tfT֚2)}VhO+)m|3+ X{FXrf`$~w߰ A%^E8]09f\206k~%O"q'@0|.y7" Xh*m7ytY%'Ȣ9(/a/ypZUl>Y|a 0Cendstream endobj 476 0 obj << /Filter /FlateDecode /Length1 1574 /Length2 8568 /Length3 0 /Length 9604 >> stream xڍT\[.;]Ҹw 4H7xpw A4[\{Grܙ_u֑lWjj:* m6)kk< ebȨi 89y99t@PGb4:=+,>۩AewG_K@)/C0@d Pc(C@74:+̿>VL.!!?)'+ PWthC@@`B98<==-!LOtz [:crK zZGh+^9e 7.vw  l@@+yUv` mhy9Z~6%@^J`\Ynn %re9 O zn7_;x}6 "ݝ9t w&"lP''zYq -~ y.>|,=;Fh\\kh 'h~|W{\׿Le ;zrIȲUu//?x;%48Mo0=Lyf-pqZ=?ΰH ɻ;:Q3ԖN G I}5TЪANUZ>lBAn /jec4Gq>Zl\{/+홖TeVctuF{gzGk"8 D?  8 ApX 8 p8@@.N99%Pn?sIz%+wW[/dVh s+P)2O n^́ M9`s˜y31kܥOfo2b m^`}3+vwνed%1=4ۘHԙl鄈[UVl!3\+R/k-=QVY@QZ>rjϘb*uZgLj}ʭQL.!bC0De6B^/ckl1DlYcl?fD.ED/Hj`w<̶vA2 K40@FUhz*TCB~C M #>Mly&_R>؈~jIw.:h?|!Tȷ"p4SAΧvQ`DWgiċM-׶A/N]1+]-)D0Ra =tVF~.l^*~xP5y3`?xF7挌>rNfY,-HmL3#ȐQ9kmz(zmR_wф|[S fZ5rxx* QoD*|db$CX&VUspbڭrLbe >BT<$j7̫eoB>,q9/Sd}9I{_&9M3BcU[BicFHri4{aNͦ]Wsc>,[dXڪF|P/7~v"7P`^hqOpv n ;,;#?l\T-}+y&i \F|{6@ZMYD c@X=l; ł_Jv=Nm 4Y@ ޵trnRUʻ2]&8KMo!+HIy߉aA-aoFD Wm2ڡwmX߃=`Pqf`js^tu!_сSPBfݛ9fe\DNlQHNGoWn:P!p{4a7*! ̺֗ro46y*Y-_xFw5}{>MN"IJxwx~v# :*TD_~ߑf{YV~XS/$L;\ZYz&pp휈2 iH,Lտ nDaJyki r=@9hoDPڞC~9VۂRXi]Qd51JN vZ=txॸ ^7nT83M+5z,WS d+5Rw%_gľJ]nU;o-b-4~ }̔eTP\ON3t.TȰzX-L,>A,v'Zs9>סr<;R`^i5UV@  +[!6d c]Aa$>b((XB<Mtz G?˯=ں^z&`ch|9bGRlmzWG@TۄAV{uF#Ru[M+Iw8nRB \1OROͫZA$bJ; vH/|l((q)Dy{v-' onD"Q^dƼ㕅$)gGJ4v}k,5OiR-v7/`&-#ErC݈3ec9`lڗҚn"dXWf H\jR /(Zto Yh;xmEX"!j aefT*➉G%(Ёq^6'[r_$_̏)m^ӂbmu[l]s9o+ax wIς?"w찮7x+5#$gSlo٪l%NS&5cDwY;Z,F g()6bR`;0R9 nܮ=o(a:Ϡ}į+l~&:e.f"^_es/#W<ۄh,,M<,ߺrV.JY&oY 'Kho?7o\vj3ϾH>WF.FA) WQՇd$7eھ8Mk^:kķ6EU Vn7LZc\<\'i_nDu[3ej{6AݙX/}G3')6*{T{>6(F%<6`y;>eeVG+S;{+YDqeW- J*5%gT;TD QӁܮ:kU59u=l)&'J|Q"i<[>'ao~zě?W=MM̒WUx]Ld^"=0 C I-:R]qJwE7X W ).yY~+;adh1A,"V^sbškdY/"WuZޅ=zca&m.BW%fP_{鯷ֽUh fS{9 Dr1; y~'b7(=U"%E'ʤ^ffS#6 ? AۚVWKi|X1%?gW`NiރKuaӐ矯\g2iju#;$ kZoe4)q^nsVM{ރs {zb+h$w UGM\ iA:K^B蘼\0MD0f᫰Iyͭ~A!mi !ߪZDmAyM>ijAE (`m#v7cـ{0[х0b8AM+bb' V zl)P":7Ͷq}ҍ` Ml[%Ȫ_NLYL xzSɣW-,So^nL ,Yr^"Őԧ4.@Dl_A,\yo#Dca‘Ir5JYˍ_˝zjvk՛s9vLO2 ' _;ֹ{;EN͐I_/4 4Jfp&rU +" ^oQo bU{@ Ikぢcwsbq0bkI@l;=|r'~RA?ECʠ(eՍZGCtirBʡZpCҫ9[J6޹z"h|%R0?fV 4laFDc[.sdRM`~UoS!1=c}4eM]`e+Gз7AkH( =yd7s[ O`w_2YX7d&p"c"Gn-!$V'< ۧr.}T6ёaM}cEppX/ij̿HooH*mŢ^DAFڍ+Th_diuڕ?ҺఠTrI#QB.\4h{(R҃H#|h\zc\OO!79.Zr71NXg$bKamaXsHK#\I{3> >&g,IJEn2 sPvΨ`sF}tݏ  hokdjuL$y<%Bb =$GcgW8E,W5osVvhlT54sldTlD7Ovr/䱓#~dJLUɭɱd%-x;`Dupn/\phnώB8jߌ50ZKdCڲnf2̀)}RulE +=BKZqIc`A[).0:Fء}0m̕H<:"qt[ Uj:ZVW׾99zAqH'dž~l}(&`72~b"[҆y6Z5T̅熩"Jg eFhmlz>vM9 ƺXٟ,H[*&=aov{2Vߡ$SH v1ܙCtJ F-Lvrg_6E^9tB}PAxmOfGCCѲ }lCRMk~jތ:Pтmjc9(6"sS  qf)a(+yIt)9&-JL}i7ܝu7';Zƻ?94;-( Z8F_C4嶽_{qf$F8KAܫ+d97yeyNGu"M 槺Vu߾_s[0R M0Xd ~' te-j9zdu>!s 6бe S5HzWS >x5.8cޅV |˴O$޲d21"JxOY|w!ɴ|ݫ]x<ۄ)2O,e7Xh>to?^r]Cs.xVL$8gBE0+H5C8Z_谀֌$_&sEVI]zZG Zo9hޕ+w[~ǤeBHC,ZtZ%بw`eFیOdRwJgJ _rKPNO OV?'xRc~$ԟ%ဩk%5҆i ߃^Pgq>^۷R{xY %}]]0Ӌ@- Ks"7Ubs󂒚G߇,ặp#3jK"#$30@/S(oZi*%768g̢@s1 5?Uj!tϸ/-^ jhiXpB]zS {MSc]PfAg|7|y̏ .ZKjVlɯnx~J$P+N8ާzf epLL +|zhYf=Rq|JפTNvZ ,}½2uIqGNx7Sb1grw<'8yRҔp\Y EK"]rѳεr7L_d#ZV~߽]&uQZ.mtIbQq+V媮_bIB(뼧Sy' #%Jq }eu2:ځiBw`{1\ԩܗjKB>ẘbiF/eDMd)4NF8Chi@8] !>3ywlr7E[%p8R\)5zիN"Jp{vM$k k21dzR?!t1Kb [6W/bdF̋/xrcw&֚=f|^;^6GުfUחK/Ԫa:&s1}QzfI{gc_}oAR`JX?J3aNgݍ)oۯ}5pIk~p*2>ȶ8]rSO+NK1`G7XgDh…$BFC3S~ *豆TqP Iwk:Ǟ|Bz*%?< ٥h\oѯ?~\I #v^c)3bL9ifWlf!qp"]mE.B@ ~)֠uy$4t"ls.(S38܌ <]{n20O!G_"wWM\ k gD_ ;?ius bMlZaŲ0}=%!4L8JT>$>YZGZ(uvvU ߨb`dUUhMi7@meSO I=CK V`-fRH%2b,*ԓBTx*2UwXyS 솵"Ҝͤ,a`endstream endobj 477 0 obj << /Filter /FlateDecode /Length1 1573 /Length2 8073 /Length3 0 /Length 9115 >> stream xڍTl6L , J,Hw7] , ²tI" J7HwKt4ȇ}gk暹gf㔱[0$'/PNC__`dԇ"`+A!l O9yBbb<<@>1<j a`W #_ +WTTw8P ڀ`@ tw ^Et9r'@( v#_ 5AN?qP?v= P0. f Fꩨ?d?_r_ 3AvPG0PKQ ` GW]<uY~W*Aw ՞ tr:jW)+lNN`>y(ls7v/?7{vPݯ&lݜ `P7_; .@=^|@&P;"n`_;xyP$ d3G@=f&oJ#"`2ݱJ;밡"{0Trx6Gaj'/@uZkzVƽtPlgt`Ȣ㈹lфy mdcd2oŊhC DsYG>s:kXŢT_{ t%;)m,{bIz9PeSUMz <.n8OQH7[C d\]J6q9u&Ym7sNgqٟ/P *D_E}%/ꤊ aīlKYz*'Ys9螒fIfSm(ygQs~8LL'l6YXWlP'SVosMzR)^QOOq@jy(~9sL1Kjg ~ke%L~6Erg7v=va-D! |(^! Sc\ (SKq]i>-VI|ey\x*j\IIU j(^Ewf'k0ܶi-h"8RŨ26PO(O>o?J-YyPM<P7D4Zg-~q, ͙?2o񴒪. b#h_vד Xbn $6IwDi9pR ϧtT8m0O\=."q;n4H=W+"4L攋T$$ (nFf-~bv˧Ӏ(]T[hWʪ!- Y祙y0ENq#&HU8Thۿt%eZ)nH6%"TGr|OC&k吁$Ig#[VH g_nwNx{{R~c* ێ>@I,-ܯсtHqɭ;_vw?FaA|草l̷*qFǔ2y1KxZ9bʜF[gM?+JM^Oat*2\iJCL;LwsrSK'CRmb&_z5T+zb)tΨk*r ugW_Nu>QUMt)־oZvhZL).ktmޜk RQ4d|vEa 2SkEމ`HlAEe|Fq!j71փ Dչb6l3ROFCZ,\bڠ~V-e<[ЛeSMϤFr_!-Cp({ C[cnP/ViONϤhy9ܮ\n0i>:+#S֡}I$ NtPicFBAW*93=љH)@CTL?#hK4*G%ta #<9|C8/d**Zէ?$0 Eqd`'Yuo2yHYimpB2' hb OQ6!d C?@;Тxɯ{M9_o:b4>`/OΪ2P)sd(zTW7,݀%}N*jx Ws\&)u8LJay7%Za%ljV 5Èqͬ~}Za[QS4hV%?ܭÜ=ƙ 7IOdb`j^SM5͹ Fg ]Y"G崖fI&\ r L !LOX`{lz m +vLPgs6B}4ܦrLǺ3Z8xc@ MIc;/hӴtov.}*Sj8^3Gi&H`}ٗY[ӪIM֎#p:aR|j^ʃЩljh Nc` n I#4Ao<<Х+;A'WQ)1N0euPYb&JRY<(0;e;3;ذbJid=wVf+ʱꂶCMm mʆzBw(rf>>$IuvA9FeuT@'2^(p}hˀ_v&NN׾f0ljI, S5'iN?;׶ߐ?ZӍMl )o;|,xe:WO68cp 7}Taz j^ %8CG֎cDwMГrBqbLA)a 4ٞR>4o iOFI#ǤzqpJ>ՇHh €WQ4@>2+[V-R|b{UI]0~j1G_hǐ\zƼ"ͦ}gt/_WM/##{6ŲbfB?5mκ(&&kJ$wǥh倩7._sIo!ZVZy6SxONGkSZ DM%x7dcOI&oĚD$F; %84>+9.9óL>;1dm b rB 2p2JOX[ m%G2-t 3Qs5#v&&A1CB{ 7ʠB㽵P=ijj#r 7MoeCFteFhg`BXZ7J GNitD\%4兒!b5 }cDbFZ}s@& {6?h nΘ"Qg8dRv8s Yőщյ 6ot[Z|\)j=P4~\j> eျ-[r5Ŧמ0^i+'id7{[{&ZOԍdp u 1<{?a fd_図to$,%߶fQ93G1S+V|\ ':|fm?$6.\ !s:׍y&.[ѳ9(q$dC UNvosr3αM2IPmd+Kf#vFPkW$D>P=mq#ӏz@:e JI7 '6ooQMnT1E31f\fĪ#}̢ /SoX <"x/}_ଈQIb-']'ܾ;O?,ALF $T#͔y[<MV9SC B{J9P 4QevUŏoogFA;-u8n #eC5.SxmR!՜g#W:5MOgo-PK-kh+ 7?B*LH7exד!TA+%jΗ[% {;4xPh}U8R㘖x/}kZ?[C "s/ owV]&9MJHEnz.It*w w8Cg5za ,Qv{Rp97`al-WVAҭR3^J+_*} b3fNI2K@/[~S;Cl멚*́K<K%5/Ck, ӷxyR͞U_ h._xȈoZԓ/t>WQV{SQΤ9H~zBVCl{g&ݷX_ jNp>k-Z#]ޣxS,Ũv؎6)#2(+^> ro>zX4l>ZBIImW;\p?HUgA0m,+<(p(V!2b*@>eJ@n&ɒv8,!Blm㞼 9ĩR Eַ@GTu+DX6 gF$hiZeI7q CAŗ`j8~Z}k]O^ ^Yi,ycH(㝂B^d(ա>"o"w0#'R1Kyw-GՒ+2k MG]f=dUzxP\tAʼnߵ߈KO)qz TUk}sj4wj]RmA1ވn"J8e y|*6 S'Sc(ڷFQ<"L29(԰U?kWA]R߬qD>fwа\ji88'gA b;MdVΫAMxؐV=p۵Уᆴ|ƖL1ؼdY^݉ͮ->=yf"AuR]e1 vF`I+^V@)B &DkI Dhθ]'mzVZK-1L#ȉ.KkLF}ʅA'›^˹Y&UIiqYrCN<…FGy/>ƛ,\<:zef%g:`_^n[URM|I=nd2_o[HH4^i_^'U #bۼ=7XO6v|@lkl:sj7밖]jmw$<&CKurݜ0sJԼdk-|o*dWUX/mdƺqeւׅHNQ7#lqtt7c3Ik_\e AP=|mTRdsOxqĽBd0ݦQX?jB2UʎHȡ0OWٍuެ!ڔ'}FCRyXu :η:ƍ2jhTreE|.G#Rqwa_Wf~yӠq#d]8 L&dh<&l>&9/fE5[]: VPKZjP|;iDv&l#j Jڴѭ(T [${Dc c2ЈLGoXg@*y )aEL-L"hg[$),endstream endobj 478 0 obj << /Filter /FlateDecode /Length 2575 >> stream xYs]'r7Zm3әVCg?$D1& ~.B-bw_`,47/ TiΒ窴>Y$o&IsOg֚=|3{~~1^~u@ZOJ6MU|GT 7\hN ֨”|sm,`$U,$!:?) ۍS~JK kYH# ( P5r "$  0@mhfݒ7zjp 3`ppȾ_E>ydѝaΥ]e&K0h3<>8Ȉ-7;CZqy/o 5~q}u͌hĬ̕΋>astkk'6ƞm7/Lsy>S  r^6_ $T,|6.hZ?>=+ }e_"I7*}=+c<̊3u9Oj^Odg[Uv͢"HT|m]0MJ"-I<-/jgI:G!XWu/Sb2mxS+Pal:NRog3{X=ePrN/t]TL$s#kC;ѯBUư4tomL>i-*NSU(a[~IIS P)zE@{a/eca=Gl璟ጇG,S.T2.q2*5Oof`c!Pffp/LhS24 ZodշҤg;Z.0Fv= K";lxUj:sj%zzhd[)S-b\v2=bDO1zGC{{z ʆȠz:ۏ51B]ÆEpS|5dñ! Ke[N;?"Zb*F9fm՞$Va.ɒþ$;C)<-<vz[ߓjћMH^ns7 ˞ӰF>lc,wWgAWx2+uQ-~"FD8 FVD,xOv V2#7"$T}oTmG -`rm!ټ1ϸǥF<P-gg~c@BҲ-%?9H9 ̛-3)<΅Za%/-jyQ:9e*)bkJr5ds:Ts(^gNeTejn:=4 ey~.f z@Iw&j;0S`^kqF0M$.[}u}luᔳQ0!!0٨-IbxYn =,kˠ"G( mvi Eb`0uvmܲ}~r: ƫXG7w}IoYmrζUʨQ`)€! :P&DY:GHEY.F"*wTA[  9Hȹ*N +|f#xВiYEkuŤV2(R}GںŽP wM6XѰ|rNI]WӅ¤MFnӃm1/iphoy*ؾr߄X$JJTpD+w)MC$k4P3r`_ɝ*uVȰsʦ[JeǠbLnީ!r\uһ]t'۾PY[Nztc>ߊA, fk`zg퐯bnȶM`< ]t>$8Vi]LaȊa'Bw]q/vu)J-= =s^WZ@6H,WEk1]K~ejs<R6:k03? ůy L'(KN}ju쀝eH$ފa {+=lvb`)]wsk@^2|"HFQAFGU2BlrX| O(vT8m{(l{&v!C<5=lm5NxWd%,_g<ɦ_٠y5-üakEoĉZ !zތ.RȵcZ~> '>S@=mg WӐ=nۓ$8+1gF=SbpVB'$DGڄWDF´"7 !z g~&/c0 ~>a"2 u0풧Bw_ZKendstream endobj 479 0 obj << /Filter /FlateDecode /Length 2901 >> stream xZ`$X4l uR{Q Z:6:"|5áDiGJp79{^ŹJz*P^zYEIz~:x܆,J) r r1P1ޛp;@v+5*g>-*)W<+=Oػ췁늉SJiEqyoF~^X81(Eo4?y1^"PyZ?=ILmIo|,3oQ qt$x;.{?L44a;^w I;ŹnlQ:|sE6Oc(['Qf=cCI'ɵvR)x]-8P$ ʹ$H!o`IIs#/?I܁%j(IꡌݥI6`tݎ9; V8Vb8lcp*Bphf0E(čvӍn #qU kYpNr; Q p-232deۯ| 6SIOLin_uE;oDv!iRtt-ӵ456=h'CG"]C80y Mw$b==XCP6,縷"sU׶l)/* bQwQx&ȼ?s~ZuFlG$EVfD}a" A7K`9fy& 'Z `Kl%&H3xr KUR?闋5҂I$mUpeE#qy&rcφa^Q0ZDZH~êTQżr '^QusUH3տ\ΤLDvP j"`2y^rWT [_a) dGf Y' m;L*ysk/Γ IY1@,TL~:8~bhC$Ҋ,]?`0z~W@&JyW8C0h*)ZVlZAo"\4BÎB55\8 URDy]DHh'Osu% H`VyQZqW\8rR6kLrNؘlJj3\4v !/f I ,Lb AM@N x[bI*)T4]GQjݿ;zj8O<T!b"[N%.IkSo:8h8yFNEIG=v_8D,crkn@#C_#DquM$V ر !(kZR"}[FNU-xPaMG~ T]E|@*y%ފˊ5T]uɘCi<ճ׭5]!lYɵE312XOH٦ T`T=12HS4%> stream xko~aǽ4ZA@m(Ù(|<2;3;w?JhqaOJK"Zl,牧R!%qatgE{zj䎥0&u@fed[|62HoR}/ySGD Fv|Y+ LXh4LȬ+{9? ΨdSi6'Dq0DBu8 u#@ۗ 2;2J-ǘ33'`w 3O"p]bnв ?NKhq$Y|ٓ i5i_8`v3+jnd LnUN0X fyn#0Dq+-Z3v|4T^;qTiVcg]Kdh'wp/oNN=I~K$ ,Kzf9 ]g7yըtu-,OWo%WM g:^ 7w! K3@ɏX'zʡLJx,+-*\i "U/M,\ZW 2䛬[-.΂E\8tb m\\I*AaI#;'V˩F6"ƁDlF[u+ĨzqǙ ua/ 7h Lw)غ)(Fs2j6ScFFpZG t-L{5&>7.e؜S-׸\S`E0@ _- U>s-xJ2pZpjJ"&Ss@y6Ϊ05H ~sBDpt3K=w s&u[vg>SϖɚJ$)V6h.!px˾u,] {M?YOosT8_J;$?hE:E©>O.܋;s- t⨕Bcj4BKUJ6jl'+gyL#ϩǮJ$U(R$hKn6VM^PP PO *Z;JXcS mݍ˂Q`V1r<|$UЕ7c,ut]s`АsXB-%L;~LU4I~Zo]%xo0 {)I-I]dj:KϬ3 15A32ܘ$8"3uv`SYUYg%q5eUQF^!\)no%!񷕁 ;B$@A]XTAHŌU ,4VT d,$Y)Ty?.x@xi$ȿⲙez& Y7è jVSa$N4}'bS[wm}3Lendstream endobj 481 0 obj << /Filter /FlateDecode /Length 3175 >> stream x[Ys~ׯ@!֚c`JR6rT}A$<,۩t  (R~ 1=`M$3F=,J%d*PM4cjk/ !}g >$U;vf_w`J*vʺLcl{Ӄ`xx^e yt||&:k'?|-͔N}Oww++/"\Ƕ n9/0f Ø}500e}%ۍr݌ >qj?`%LhP#U:t!+kbK s6w'+C@NmPgf:cAxo3dǨ݋gs4{/ZZ݋L7*Y-/?vC@c G4H#9;b19&ҢQXk#<3 0-^4K5`[ Ů~"+Xfݨp?[Y%}Io#y(D\1{͐9^3eyv&pRn׶FcX}k/ˑȭAۓ5Q4;)Mxo{o̽+<rS!9˵/{+ h[v̑?Ȱqy<7ޱºs@.:Rb?7b!JOZ51iҊvmf.jnaO$:;K[6 R͉iN.N'!F0ޏ})d 5-xYK,m E'Z s)' %ӣ)8!=&WP'U%k{i|Ӛw Y#l S0tQv^,t%Mofp·?Șǡ1 $!E(dXEw.'0_.X:7OGfΊxEeN0 G6!zy/2d8 ƃܮ|_W2xG@zm[}vzzjŒ6 ME%7$m25uHs NOFdDvsM-Kr%B|f/|#7sw Wjx΂k'lTwU5zъw']$ewC$A 9&9$v>,B3ʙZҥ7 \9X t+Rr~Ms9\L&Nd=AEhy-i1P9 :wA $'[pɛQ tU^Wy.%RNUWV]`4Uy [5%8;o[E4LvcَL$pOm$VQ,Nv28gݘwr~3)/[\o[[]lνDv pӒA^_ +djGl^H'Ǵq~OⰩ/A5Y2rZEX;RnƙzT~׾n԰^b|=4WZ 3Pvbvx|'iP@ X7h2)K9Lp&){4IpF'uebAM`XB}4]*5T+5.uJ[5!W$dIL]e*)ƪf%{-EڂQB'S: >;; c*\T+|N=/dk)*38YK߸7fΝKpo, @l:vkGzJB7ƭ;xDd*~ɥ:?7G\GCہBpUn e`ׇP{L;Kf>P R"5a^9E}M@-roS$ChuMS?ŀJǧduɵDС\H"Kox>V;ѠZ^?DdPT*5B^*-8%&]' Gǎ E@Bԁ\!!U`[|ՓҖ,Kt[ K K;CcOo5e\5m`ӊ ;߬j{`!*mV[@Cn2YJ#ث2Hq&9&ϣ$3T?Zmk[0!8 u]y(_zf_}OL\m^n C+[vq UV9W/Va;ul9endstream endobj 482 0 obj << /Filter /FlateDecode /Length 2680 >> stream xn_A}p¹npI]Y#Ѳ]=rH,9.R̙sddߏ^x[Df*]$JId49h}٢ތSufn'ͯٽ3^niඥup-޽x+m2QF۸9Gߜv$1KdRʤJJ&ˣY2wI&tU&_h21/_y,q;ZpJ FYᐞ Zx$L1::۳닷G[3,~?`#T9\_@ 0򏇲r/pUrkfG8fhച<\*̃$4=ws|r<Ko[|o-dHLt% U&*GYE}zT\fZ?e{=q!/x=F]'%@ ?~ 0qlau7noA>c4qjz$9\(;H=} 0lgݒy@+Hg].jn`SC+ݦӗm=Ή 9BAʶcpe.J3?:5+d9]9ٝ+«sF _z~xɟ+U ^"'rwTƕ\ϕ;h;;. ؞:i|9kJ_فl"RT*`b.-,Pˬd5A#R]XEaFƍM1izu Bg̈́Nje`8F1sC L\wٸ[_^A94޺b׹"vA`HV>wT=~FB4n^ȺN'8RHH4(Okw)-U7C #6hЍf%*fSðMlUlRH#Lbp1+ ?+~ƲˋCrYC5 :BʏڶNa!7, *ˁ= 7`gh*6fRvjW DumA.UUЅ-EܣVaCw7`)`i -_M )DL;SL]6) 6.{2oLZ`i!@^@0xޫ(RXiz p*05{i碅p6qI?VfMt^+1qTC̎-n(R 1jNϓz oPS${?Rf7_<7p4_]7J^/IpCfọm|Ds-џ(HQC=b 0{T@zM`d*ӫ@%w5tjC$̋8$}Q A;/w0⟂v,bB.Q>[a/B [>oǥAf˘@~|sU4NJO1>rR8,eU43Q4tE5k^FˁVBAx4"gx>—K/&6J;t@?HP{٭])3$zQ((V\ hJv8Ɠx^!;=YB?ARLFAz;VvSk|@YQHlX~F~5k 7ns*A\ 3^ʊ#v4ݸMOʶf-L'8VRb^m01H 6]^Euf9m.R\~%޶<-L_77Y&0iB<@2.P>"PvBW/5u\.ǃsVq;A qmϟIڼwNq&uJ?8ixӉHwfqm71 H:CGO}> stream xYoFݿAڍ&l[ &-lM"@Fe:\Q5䐢%Ӡ98Gq^=z]cUą^G2º,z3ݫ=˓*<8OFeO5r=֣>Sx\_5Qdɋ 𱀿ik]GeOpGGq.9uS{G\%kx Wgix#MF妈7h?GE]ӪU$~zYLcy/[6$'\y)M_ƨ8cB95qKD;/1Y'&9SfX'YsfgDom;x"hU'[41Si\Dma8fadA3ޣ.3bV2MU"ݏH;Zu'0ZaψgAt&!JbKaSFp `Uogﳯ毎쇿RaSuPujv\*NOJ9 º q%,g%Y'{0I3" <+ݚoOs>ɖd=zx5U攁c(_͗D EaVW|,[J)|5IR4x`TKiҖ~/M ܩFjj!&)'#='k(}h)6$I2޽_m{֍6pqRA!ՓNL6Ԧ;^ϵ7ԣZM\j<#ßÙ` g7`%0"$v¡GSNGe ͊؅IIƏ@=sYeP"p$ҔMh1UٶlorM6$=9XR=1=+@~ne[!yMS9]63ܛCdb Cj:Z$e=NSeDk$DGW4[T8_-1EAq dI5Z%q+ө5m!@2-'q< q 33 R&db ɴJ ^R^x}!4 = \Wo=$6yR8$JP܄Jeb^Ld-AkXT³kFe}X{+2ೲ@$J+ݭt"@G,4侂biH\ah@Gi` tۛj"R \ ۶Z^]T?,%m+5qY/5ʺ c~n?9"+!^t|vMfmGr Dr>rnL@|H^0AzvX:'$3Tw2 3 W}~}Hx^5j PU39AF$5oҍMr$mHEȽ;Q dh=j7{Ar&/=Ȫj Q+gFcF'v? <2ِu~OSsJHD{10A痼aIIZTvu0D8H*6[vf/ cK9$dcdI6nsu䰫K[#sGMpjЎ`‡IMQH#cUiʳ}%Y=JUꄰBW#:HumqI^m+ZȾѢm%3+8t#p뻒u%R=7>ͰH ]{7Rb}8YW'C̾ I>1Fk9y,_"77cw]l1h=tUF_\QE=3+]s<)UqK݇0Thv4:ÈeFA +;;<-B3KEfz.>E'$KKp:P]Tx$Y<:-\q}$,+41=02r~ u3S$aﺐ(_qP:їAٞ |"xukL℘9lVKk xٶj #Zm:8H 5QbMhIoȃ4vHhM0i@z7؀k.F G/?RAlItRҭI(mzJSe6RUܾKS4.+a$r*uf4㍧OD_ V mV%v)$.E^8, ] nMA{6m^f׺۶wSYK{A#)XJ[%sT/f Bo-UƤEn{< ~ \vÎqݼMD؃"7}ڿ]#p/4 t#mTMv);bdeWi3 !Tu`Y6 vDvyMvMg ~#eӶT߂ơ ޮ;,Ѣ-}w Y =R yӮ5ڍ`q8& H\- FXĀ`2CZ-nj'3^."SPr*' RR2nqm+c4a?pZ3Ngûs3nMM!7J.gLtYaQwJ8iՙ>7endstream endobj 484 0 obj << /Filter /FlateDecode /Length 1957 >> stream x\sFxRs&I8ӌۙ&n&3c07/{+NtNt{`g`uE,Qc\\;GG'd!)s.nc8^e/98V5|T(ŀ$/#acrѨESK'"NH=yعoh9}~k²n6t|`'@\&f۔iN }.^&%Egwoi)/u"y{'|J?$N WM-d^3XJK{A"ȏu[ ~kɞ.pO0CA߃w/7ap%r+*KBU 5!DDв!3X,-b~lNA= JȶrΛrc~ب΃0!j3RbZ",L|l@$5(. Bb~#׷Y"$e69o2Aފ5+Y.zyթ($8a>!j} K QnD#(Ĵc _xұkVÃ(z4*a>'p X6 v'oNZDWM-Df;D-bSWm]OEe6%h %{ؼQpxE Wnj.gnV t19nǑ_ۃ5s"{k.}rk5T2j@I7v F&tC't;K&DӳI5'{}7V{Y;;L8,jwb,dԇQC \(J>|THh[mNd>EǻBB:<"r"z`Z|w`4cgM f"a+ycC˜ʼN&b~Jx2[XXnd*_+P:YU-7\r(^Q{@wjB=Yӗ?:_3'EN꽄QLZ:aW `eCSUkپty J P|(Rhf)a r wPP4V\I椨PNfG.[xe*Ti*w҂梂B\9/FAPvX0Ʌf c|K|]ꝗf59^͵RevSj-2H q>-nIc3m}](ؔ/ZoV>dX|otvhy/&\KM8ꄏǹ-ƴKĮ쓚UXXWSendstream endobj 485 0 obj << /Filter /FlateDecode /Length 3993 >> stream x\۶~tA>fN8fjLyHIQJn^J5*l!*cG7wJ_t'OˬY0yz]f]Oz}w6/lwu9._Wxs Kw9^g޺>÷x{췃Ͻ#N)SM!d=y/q 99N$qs˕)Ѣh'9_0U9zCV,7W/$GX eOeKKFRah-׀,'KHx0XKQ*ϔ/ F?9KǓ;#I{KRw%>/'t&18ݻM $L5S;Ijs~TĦdI#^9+|'2GVfEaY;W:' V2{֛gg5Q{wjSqr>G<-cU*p/ELkZGXC;Q2+ƯW3{./|I |kwQlO_} ϓnVfFeγ:.r^ʵߐ|A3Z㐰.пeoB8Zt)l{b? ΑT, 2LGKoWlem"n97̴ 8p RHrYO6*EleEQ>ȩA)!g-P>e {SgRHW?d8jl&*Zwk2q빣k@če>1Pr^}jVkp]Dv۬`r\1hbrL*Pj\oAL$c̱82хt;zzԼrߴcZQeȊA> օP]qДw.S'ntolPHh22r\87q70A aq33s"Q&yJC{poIWl(mXz~hLTn|լ EQdYoym+ߵP. 6Gp`&-m hQ`i0OܬTP@`H麚^&Qy|ToLQ٪gBDݾ$ G ej`Z T*R: j,0^kg1Ё6BL6Yִyj];K G L.qN@A&oL&Xf@{> c? 쓡'tvz矂EuLw8="FSM ,<7(\Y'0/좝kzך׭cp\Eaʮ6@(#BxthqS u4MNgagY)|i poT_x):Ũwnrey5uJ&X tMIQ9vMnȌ}):d[ro6)ʣW2&>8.̪2?)"-Rt r(RVv7Z!nY( a)۲'(Y7p@ NyF Û;&mi[Duw!CWB!ChF[x)NX{{jzrW!Tb=|m;do|2TivCb{F\92"Ju Pr51߻5q\C: 0.x;zW{!/l I`ƻl f HNka3u7g:yVga-(DL="itV)Ci`Z"'c]GӀƧ3`%kn1 mޚ,Ufq[cs>Hsn<&8 ^6f壥*aDg$Ҭ=:k`ެ|mV^j`wt,$KC߲«[gwQ}'_[Vp$ۆM9n99^ Ϧ6D_qjj&0wQASHo LIؾtyf%EBC.vBHzlp XV=`NIq}sEt޻_@P.fP &KaJB\;NgSRSVB$k_+FԴLi9|laoX+z VI%6.QzB385X71!a>PpشjY,"3~Y<]BU)Es,k%lag&G5,5B|5e37 {`. 8V9@l/A<3A :8@u5Ad5Dc+*pMK rXV ȭ;y|MT ]s1VWb̃fl"%GΫq8js~ gȥhUnI[LFbo=FO9iCXڿY0ϵ‹a=qfA?2g\P6Yw=@~UbF &8],|Vl+S[|L7t@8~w>+y Y|q宏W"4d-cY=~yJG6#[̟1R \rm~? _Vʣ C]fqk/9??endstream endobj 486 0 obj << /Filter /FlateDecode /Length 2175 >> stream xڥn6_a - *^$Y6 ] Ck hAĈ.e7˾}FrdR(GY4'Nv025;iBYafj!x3WA* ct:{K^A~n \/gGg:UODQGVD8)fqza-L.X@/( ~iO8\oi %Ϸ?U1R@Ȳ`AlP HWN1X6qʀy/t[7s$[ "];%H{[p;A:nknx+;˷l.^m$jIn[s,lyu778beXe,1zM) ̧ʁ&ZV'vP&m7S.32`ΖHzF%,e ,VxMv.^#fR#PJa*5ځ.0JMQ{LW0Euȃ+Oa \y1kCc` Ŕ 7'3*311N0؎ci0o IS<]1C/;\0k0E`LB8$LcܝT.8]OeK[H+_zK{ɇrjEr/ G` 1=!vBl[N"7qܒt,䅠 HP"vǩ՚$8AvwC2 \TWSi8h G;[brqa9ݐO9gcpd^b5Dε $KD)r^b>0 j{pǫtd6<޺8wpgݚ(%ţx? -Ŝ:ή%h*yW8cүWkav7_ZL6aO$X7.8[O( )6t@!^2#'LpyPU|*Es'ن+UI팊61NW" JY,N)EOKZbK*[2'h=$d8V9PAn?C˔NU1(M?%W_M{؊# 9)#5X['rP.N,XLI/9~5S{I6};] OkMf%aA]ÞrCHvq,B#CiP y[_t:Dw5}s:6]kA V{u1QR\\@=KBRMWihXu.x)ͥr35R|GP`p'`Jm7BVs&Α| *^39E'vYLꇘ51̺߹Ca̬3+~D*K_*"s>%g/. !< V< T | ]rht7ngPh Sy<@]9h|-hFo-arki$ lPs<ǵSV {'}46ssFlE$qĖ FD~!ɱ/^>ϮE>8d1rJ^fn8ݽr&4,g0=7/gGۤO1rҡ}XlFLO6od\\J'|i1m{nE-6yKebYaxtEOleO;% ;ۇfSE_T6R$@ũE)%ΫRRfO7J%>sx@5#IصcQ/W#DcߨKA%z$W\λr㾣ʍ m,eoƣ֌:w[VIxi++a'Zp8yd )&Kzˤ{r۳' ?endstream endobj 487 0 obj << /Filter /FlateDecode /Length 3688 >> stream xڭ[ݏ6_ayXI{i>Eq=(]kmM7_(nyКpf8[6rՓ/_?g,|Yfv7j\UW}ͧj@eѼjy{8^-vw+,K(nYoV7s!t{sϲr[ßv<y-#kfN)r+J{wٹ;l9tPUh~CNb_~~'jQk3KxSNJk~~+K5%IJ c(g 0MQ{;[(SԕپV:; 8^p{枫eK[aP4I}__)Dko);vj^jqo[[kТ[@_eF+n!m=1]}G_=u2UFhLBɠ!# Ū~zsh⬉sQ8VH.FhYF-c>%Fd0+֏`LO訰!##0R)V1be#vH}}#.T&rnT*!*CMa0fĜ&&AuцEt+/5h 3_j¦6gI4jEhdX)4XwP~vC;/tsD] ^{Rg/.:*%ĺ51v+TԈh OU\ ٞ0F.CLv0u{.RJ%0O 2g@ہd-e B 9?`ѪGf9PYx{' UG~~.mt'{"=,[B 8nC+=RTUHؔp&tG$ˌmMX(ֶǀs1ۨU?o575Kp*bۧH2kCIK21L-y&kݖ:wҬ.$ m%h'ˬ(jB-:LKȮnT:km}F6\y$&ӒKT+%4 %`㥕/");/rkA/2M%/w:-8RPˀW>|SxpIҿ݉wˀiMt5WG~obcXcC Jȶ<.5܌2bJ*|`n-sIc [nLd"* ݑϪd^cAAg@> ]}HH[osڱuѦ۱B)-#e'n<=`zXqp$ybdR[ӽT_$R؉.h9@hmP$ELn6k*T W'm O*vҎ!_k"mVȾkXy͕Au1XpY[M^bLnQTnHއdxb`e\}cwuc<#̡=hS e#p0'lneaRZ1YhAl,Lq`0 RdPyߧIdR<#wCǮ ܌!xLCOz]OP΂qFݷ ,A%-Hʬ8 ӌulҫ ~ !k0vk}b儱[[?`cqSIVgO`ҥ҃!ҡ߄r i)!5eN; 41"15Ap]q0NXP Md`os94@ I>$]{ND;K^LXü0qÉʐ_ދ(^F- 3\}mC*$G- ]9XcVrN(ԥFδE8-sTu`?飇XSJq7[.n8tWlT2+:>eJ$-mvc[짜r%I|2q1< E;Gdn\JLJϚ0yTuQ!vdfrL|<f} M0aq2 CfׂUnx=WO{Vfv5Y}:YsϷa\'%nK+t?l3{4F+cnfx=2b$1X>&{- o%?,n_'oiB_7~OQv<ݾ{cwSz|I|/,UR&N!A]<"O|;:<ҏ:> pq㚐Ű"LbkvݻX Kʵ$"<mr7_when5!SHxp(z0k'|)$w@'`I!ڡb}]k1*Axtk{갳WaTB6&?É-o5|x_Y?&Y!qjbήF uBȇm+tHeF`_Ƈ$`9D[)q1ܦSlJ> BUcYqG;%5OF'/؁Km)=:'peܲ |,K r5JwAǞt'endstream endobj 488 0 obj << /Filter /FlateDecode /Length 2921 >> stream xks6?Q@c&\w3MLf|`$Vb(%ubid'J\, Hz?xJLf;>VBKDdaϼӉ |$CwQC;@||>õ)cMu>yws P$*B#gG)R%μ;`'EgZx яGVy}kO>,mqBL,sX˒֣Gv>m@W|p]6uόҩvB,#9P!79tGYk(O &[3@82K]d(&:kA1<{R 6G,UzrRɤ3 ]V=b*4Im rQZM wc3$1=e&uq8*#$0KM/VtHjO*S/: EnPbj.&l8q{xA`򴻀o8vm8P@$Z 0g"9ӍMق0u"]ΙLI8w` pAaD&j[:vnAOP~ZbJs&4:1|%",b?"N)| Zp AW<+D~ jN *dl^4 |- $"QTC+I CXB=FE RV.הlđO”KO7(|en&Q"v[5Ȁ$%{.D74a v`Z+',D9!$5(KD>m814ێl9pz$% JcGؐn0JƁJeK5w%ɆFZ)LG 0=sJ & +^s-߫cIJ%H2qHIT?Kb%&3%AI)bِf5oȰTy7ظJu.>cs&Yzye[sQTk|]Xi&=db"]~)6GH)w~8)Ԡ. 9 "t ә%Oně~MdFM&u&o:Ðd'mh3mi.(1O+bHI6R3yK ԦUa$ri5}y0 $-dAo͝-X %qxaDqD,?XuC졈ta[^ƻ6qƣiC(3'QtłAc2Tدn9nJN牝)|A:A'*Y?Ҫ:詅 F V\!Ʉnr85IzT*hVmq@B8>"1θHد׊ GQ*; [6d7#d.ϭaQU.*?E"x+q:pkO ?>_ຜ4T5+M>ę].ʕ&Y sdbt&3>"URdGx+P!"!u$?,'>w˾VniYVGFl#:b/+(2NZ/[Z?mCA_. S#ܾ,ʪz?uCj8 j&;u owt\iEĩ0WOP\ E->LU?)'t"}C-+P3kR99gXѬ)$#Ъ;E8:8EHkGknZ^ ٖ2F /|3UiolK<8ea Ӕ> bǯބ!iᶟT MZP9f:.:Cri *NY"Q7OLW5#R$\#A|<:OU7 -ہbVO;!W2~Xhi {{݌#Xw7K׫Yү.‚߅mjOj=0=uq1j6Ƽ-n>#ek4*Na@w2t$~{:}^vq ;_c̚gBQ=qX Jba|k^ZpRŗ MRb5ƭW6Hda CJh^w7 }%FPMGi*v׀Px[yOC𿽩)Poֶ0uU n`endstream endobj 489 0 obj << /Filter /FlateDecode /Length 3675 >> stream x[s/:!|x}̸I&㦉q=Ù/cEO'FgX0+^}75ړ_ ${=cᨓhLjw>aK۲im'bk/{'&DӴ^ 5X5<̲~?rfs=%^w?jFͧpW[V-&#sY}D7ǛR*I(]hg-i:R>uJZH[*7dV[ϸ}%w2n{gYSs}M־!1ʼڤzfus{i} 7kg?zjV!P[Tt&JP k,Yf-Z64 xz[0U._^0$908s T]Pp="7bs{`B7'*2G" *3"+ #{Zť % eɤ[ Fi|S'y7fv2Ȏd|Iar$EoW.'&ye:5Q) XXҲ#K#Y׀1DpYyKSl~ms"꺷^t:^/,$M`ƫ FL2٣. Gʤ^z/DVr{=CQ$]hEA:"HqIS/1TF*{ \B9sũyR}]څ/,}wn) $cJYЉ[SY:L7D^n}$HVd-N0pRZlآ̜|Y{XK :$梸;JWgP>:/A۞. Q&W&3}Mj |݆*D: ,'Y,T`Kfۈ^}א^'Nq$W%߭dLHU!Ppq.x i"5D#WL$%/2Lfآs8/xh܍Bqn\2kwq cU@.yq<ݝ VIh8۪ ]t .cF"(;@.8XHkÁva/Rș 2SJ .ꇱRaQUET8[8x,mv>9\̰d"7MC~EzaRv<2J\Aܷ 0.KF9%,; (HTC<9lnA+_UVveEqx o! 2E>شBn\CbpEG si$- O;G^I\P^%L!E ٤_͊r]JJSb|}RExV cEٯA]" 2D&wTϓsYYd)IÚ C\wgqͅO}`D\K4裨# q$ʲQIh[+#u(8,:]hT/*#Tw{5gZ*+S'\9BP.+)>T, :y/uBWA7QfXhj' /فCQ^  V܂`O\FFsn)m"fJF0M?#?If(޲~}=c%̔"ʾ̿f (Dj.DwJjA8@E2vw \*Zo0i ^@OqrߐQdv=EtAA%WfMc!G YUÂ\{H; E3AQ*xTs[S"`y,Wwzz!{p=`vspIBu,u1@-:򸫈vKKFS S\9QiO?AWx.C0LB 8ҷ ~Xk#ts?*D4hi)m{#q]lJظʔPJ~Vt҇)х~mݺcs뽯mk-ﺩ'B w߫=(k[rUĶp_7{B}$kX v# |$]c'KyQo;m]]*mBI8]2X*v8L\Lyקa0> stream x\[S8~ϯci%Ym;Pe;e;m;;iH([h t;o_]"_Ã-tt$A\XDad7_ QcGwg5\1Cb|) $pj{*T@ZߠGEN0E,lq#cSGAV'2Rr`knFiOy@ * qk C.?K#k/ʑf!duM OH%opMAp&ڑ]-dYl?AV[>{ 5~vh x iBǤ_m*NUR.ǮWb6_.8Xzn:2#IG~E߭ؤuz ?[v[sY1l lS f\T_"a۟|1o5\ũ'0brm_6YQ:o$𞟾/󬂴dݚbޞk2y- XRǕo\MﭛJ/UJ랝hT[;b QԥoPZ,RJ$_ VBRb\\i_ůh 1Du}#W {NP z.*+nطo@i Y,[DUs*+-g%w+S~- [[E"_X#I)tH$mvEn2߰2[Oz̯o|f zΚY9a6yODVmAMtJ&H-^ Qe$6vɳQRmFNxnDW|f;h-U#a/i4쫒\YZ޴g{`ׯ'9»3 >S=tCͽ4+!}"ߔ"b"Nvv t3v<)\ă"#4SlC vxwSY\bW{ݸ=3GzJ(\ydO# YtIJ#Dԋ~<ݸ8a>!qOt}1@Ceq)`Ɨ`p(j8 Il1' 砄S8 ev(*2 ռsB}ILK ǀ?Cc.nfq'E[@vR֮ !z.hnn3C*ma8%6wD43MmYYX{&m2<|,lܻ+ 8ą))q #3t@SJV9,Pendstream endobj 491 0 obj << /Filter /FlateDecode /Length 1345 >> stream xYK60r Dmd[4K֖cA{9^t9EQ7/rH֨Z?=YzÖLeq.U7hn~vN sIT,W펔"*fm -iN #+t,oS\~1qW6+Op^wz眢2Y* +ołW8|r\W̸I}[*.3`XpdKoٻ˲\x5ƨc4/nB tsLT{.FPf; \w h%^9ucׁ5nTJ=wOvr8K2z-*۽0e8;(I!_=R㔴Ȑ4֜ydeدIPwe%)X/qWVFnqy &N8@hVZ7#P٨3h&߽2`PcAs;ZSŶk[צhfBrYX^})J[5B/#'>q)[xޢ+oz:}R>}'UǹOPgj:V:\MSyq8=s)X*YH}<ǸJ>YUF1`EhDKOF =J&vDY??yendstream endobj 492 0 obj << /Filter /FlateDecode /Length 97 >> stream x31ӳP0P0T06P0P05WH1* ̡2ɹ\N\ \@a.}O_T.}gC.}hCX.OȠl\=%endstream endobj 493 0 obj << /Filter /FlateDecode /Length1 1484 /Length2 6302 /Length3 0 /Length 7297 >> stream xڍtTZ6)H!Hw 030 9t7J ( 7zkfgV&]^y;-DC$Zʦ@ !+!;Nj Ax@0PD@@HtL D0 (J$I jhaBVE/D_w ` B:B\;A.8 ACG$MۛG8pHG>~Ih\! p{7.P0NAuMY/|U n /@:*|H$Ex /d&nP #nH>/ʠYfwuS" `\g{eao{Bԕp! H(m` Pnp7=Z$jA<@^ @o+B `#(?a_k#> _YfLչHTPP^A QPXD &&w!]O#+YfH/ 8L'ߵhBqU:;RtqsE?p+m]O$z a7jA젞#Aq9sPN;嘿Ff =^/P@0􀁝/ږ!z~2 5h"%D_3z%@imd? Dp;"@`gGB~€`_8Ak؟JāW KƑ@ pTScxY=oލ &DOR4κOWdkrP;mxQO:./OmtO -ޑo'k(o݆٣)N[Hy=4XMf#w F!eӬ93̷\>OS佽axM T2_L<[6{@gNK}Lf 5]dh)FU0.BC ~UQHU 1J}۹w\e>v$!ūuyKYD]5S1w`IsxI6O+u 9Qĵ+Xb*:朊X!Oqzzy? /[<#Uj!+^ĻI#/PK] !#>JU:*< :9+ nʶsQu bW S8T3["@j܄e!1coRCW\.Ӹ=H]0<*QN=EH 粍z||zI˩[W+HҶz*f3~X4ZtqVxɸN| \a9Wj}6ogH.j1g_45᷄?MQF'yRK[Gc~j2  p-78 e_ 󓹻u[T{*ۋwKC"%.ùV?ɜ{t%ײmINHX0>Kx]4l"Iq۫a RRID9+ݓUdrp!V}#U0.> yAftB=H΀ݡ(*A;twW'5uR֯*' h&pF70cW@ū+/(å8s1,옖#nI?qm58cڸt~%;GX> : z/syYJYbOw]/w -wz-md=f?qKj7{j  ۷PTEu =Zr<:"s\-"y.팢Խ8o76[ȝ):&iUYW<: .cGlר&_w# I a*iV_aQ H-%[\X/ {m1GcP$v]5Nke.bc&Uůd%%O3$z]uB8Y|w@)&%v,iۜPZycvT.,EŒ DJZ2g(?] "2yH~Pֺ֤ $bD>ԏD^׻:tYSKP:b7 ZGl#zmZj;2ejJhqLRDr>= ~[Ͷ͛ġ& Y"$6}ч.~]'$H{4Ѓ,;rVw0 ZOs YY4 źk]vr]DwLw*Sr]҂P@@GrJ5u/*lIlMDlz2Z)Vm$,Ͻv B_~Ԫx#": xڞf@th#)ٴY4ʔ]h:ߟ vB) M7:X*;I?][z5:ΉH[*%D@3Խ9KD|&(u`^:؀ǫNtq30+ՂԻ{A=_ҧ1RxgEc~%o4t0 TE~d@iHFeD>{(_dU }qH8~_Y_aBrLKg4tbԖgK2.*a[o5Z_g0V&u{~!94I0˿λ =H'T*jgz}'0`o91<i;`~@u%0wUa=jhKb#.mփz*;6ۅ\{@O~6~ U# $baL|vt1 l7ý mUKо2x/oH=co; n6#bT^5@?> IR_"X߰E[ǩdZ|ʬJA>5'wc{$T֎ۿ6aaĠL1 F[~Jl`iՍyiQX'cl-6v?0}}fH֤BR8Cd,yg:X~ZT½;Q534 zk <̩ &.yt9tԆZRF}ADNVĭ︊z:;HA9|W(͕01B6;=rxx~(tؗ RywFF`P ] }n+ˬڛ(f=WQƘ7 C1#Ͻ{F@)g>v]5ej}h땭 -WϸN6=)Ze8d%~3r`zz^nI=Lˍ22!ے¶oޗX,7jخy.me&F8ӻ0-G W8`iI0m4wO^$^sBF|"wq=8<W{f: --߫&nQ4RmQz)5_sа٭;w,{"DCs锋éSzWZpJ 2nZX7Thu0-&39CIJen 4Mg Ͻ9bV|+)#`exrHZǣoI;MϬ&vWqq@I N-*O {|$R|F )Tr7#|$#2͢ /=:j( 2I f1^і{'{#dqhSZ<ѕߚXy(bpm>ZdT[Rz5/$ˈ&kY.uaR]U (Bh1SpdCOd1\b8*^sp%q̎[2bP*i.Lc r68]qG$nc'dGJ^H^^beI%F)\>[J@2}ɋKaԠܬZ#۞lK+r/<~YMgxcqNm5tX%!&'mC<zNmS/m0sv=#P~>ES+z/Xw ܐNOS3_GčܝVe|g0|"$b pXӗHjkVk'T=u4㘏bƇ;é \4%c gFJ[̺;'4}KIVڅMQ?JMN~fK2S;Pt{=vrۍAボD oxlQ |[&QPj9˾d3e5ofݒ[o0O?x)H**(y L 1>!,`ڸ.Ks=:Y7R]~R.Quߒ#Z |! '7eJY r%f6,5Mtލ[> Ն)κktc|&]+ױq U̿"7GxD\A,9./:u#0)HWuEu0޼a~y^n3?1~uxQjl]GMB7dH'8!]擼<L?K 63yt&e9g1;1>]guIӦ,4D/Cyz Wth=y;i8nG͘O5C5Nn\Az^"E33[.Cv-Z<Ȧ֎Gד|7ȴ&!E'rNrԄDx_^5A|gwdď6QssN׆[L轥Oq1SżUHvd>7"]Liw3sf<,Mε*h/oBRj(qq@e#C] lyxRҊ8zj3쮲\"U1v9c:%aXf*aC'RE8$w94*=[ﶱv-|E>>%}}^BI:oHTe)5b vh,Pz Q,g]L]Y=LNR03,cokeqWb.RdŜtBX*t5nV`k@̍n-cZqN',MX(e^U=!endstream endobj 494 0 obj << /Filter /FlateDecode /Length1 1528 /Length2 7340 /Length3 0 /Length 8374 >> stream xڍT]-4RE@${"%$B $wiҔ^Dz"E^RHƷ|~ֽ+k%3g=g=ag}d'CU.>0?H@ A~HqQhE EbJ th;XT@( h4.p4 "` Ey;Q( Ý+B!N$x+=*zzzCH /p2@ 5~v=i,]w0P]$kI90??G0 E:B\.v[ŏ .Dx@N,!y=_(+͏F8; v]`Hgg M>% 7utAzm"\`ۀ\npu8X俘 n{CoWNoۃ+`mcH|8ro P np!ov, ƞ? 0a~dU _G |e篖TP@z|| $$>;#:bN^F]_"kBNJҟAPYw"w'??~3/Vh#T !ܝ׫`A+i>?HOVAxa7=pN#$F@N{AcGAءw.P$  (ě{XK Ǝ) @~$EH~8agoDD}a7?t]{@X]absZX a`¶,mq^p($*PrR%דouP`7ToQyj) ՌftXҡMG$k~J-KʺMэDc8&) $Qq1GS>S=A["֢^UJI7}LMo/BcD꼼)zJYI;Ktu-C,ˤ$tx6EvKj_U-Dz}- VΤvQ{$Sf*YZ4TNZkV cqg:Bo̎a3#Cݡ W+?z؆6?JZQ{ŵo9{6n"`.fb(S4+dJ%(HH0sG*Wknj[eK,ub˂ܳ]0S4:FyȯkBYƧYx{qy Rz-*49]pJtr>w#[pI&,djrHګ1>#Ig0xAOz7i;i OZ|$ckEbU,\/%>V|yOzBЋ916 /| aBV ˛4Գ݃g]?Lk\_EzN՞X*u8 򜓮m^Q9&dgdX$aByg/g'%y|n4!w)[(qYǪ5 jRr‡'㥟m65H9h^<cslt:g:pfL_ Yd=M3zauɉ|>2kc~Ŗ0vH-g$}}Eg; 243+u9_z;/ǖ$H"evIđLtۍCs"0k3y!E0gk#I\Ubcw^u-[B)^e6-ʌs+_+:#VA_ DQGݴT*ړnumB0EzY"W2$SM&yfC2o4kV$$2. #L4ϼ  2Aj2 Jg+e: >3TpZDb`AA!s:[PdH׵.gT;ﯕj~OEz3jo rwEyݩʾ/i7x'Z_3ce\E7.`H{@$hQ!Yӎg¼75تn|/4A+*|l3lhY9}m0=6 2IWtތ ,^;sgXۍFVs}#7b UtY,'yҩ#P1&ɻ;;-EDm4[Tݤ%2]..q?:*8*>O|![23~\jC4|>@'ebsqPT ]Q09?z7kt.@[$$lN䬦^(Srlyה՝XtpN^sܼUõcDlpbq%s7{{b>Ӈ_Zu$ﯤ$.ndPT-3]kOysG]~xzAH{q틠+"v3OjPX[J\}&{óƃ(GThg7*ؿ^7kt#yys^:DiiZ&K+ʭۺ9t7Ztzn&P(g3S#+W6OJH1EriZ3Ar̂*"É z.$jcֈ]e_6+3b6c'rh;'^D[/G8T.ζ AvCVm mu)1|r juW4!mF.5!`Ex쁘$Y`~/4>sf^^|]֔MX+TZbe$sAk]D񵨪Ow%(No٢g~+xk8d !fK 5_ 8V)- ˁW cUQƥndeIo(']X|j-s%PHаI)ex{:y㔋[]Eu{e鞐g (aB?y 9W͉RI&MFxq7ȻeCZLȹ3?y;{K(%Rzc$q|/#sS@t7dnn6gE8m5;Wi|QU6ʿ6R8$kޞ-L iU?DA/Psˢ-r׫hPq#)p X̵fj'jSZƦߢMe(EZsZVtMnFG-ϯ|6b6פ`+73rzn\-EC}HCwv`S^:)F+BlohlUׂ*J28E77_ګ#A "׉<OH-)xb:ʌ޽h_%pPY D.'RtMg" 6ѐ?O.*uE7ҍ~ǯiz_W^c[6ݷ@B0$fԋ4!hîڐՉ=bxp*}:h*?*soɋT3 !Z*hAg˔e7Pj6܅bv&F|3ł$bƢXbyۚ$Lo_+0;"N- &qX *XJ M㫂jQSeRQᾺu.:^Fq{BLBTۦt#7@hX@`D܃6b,Xj>o8[ԑÔ{mM'퇉 MQ9ݭ2n)&pPaO+Z{J--Q*&qt>B]\p/ X1V$y*7|Y@W,eSHꊩy;?Z14ѵgbJNi#CNy*5xySd7Џ!4iOլmi4aXjT5!abՒX+-ׂDFj ;xc#!OhHvke\߆}|6(m]&J|'+X7ug%?N8~^Uq1^aGHupk~L!hɏ^g-~z'rY|ӰyqX(Q>ֲ8d>)B%3*g}Π目3 .xW8` XyASfwqB{[Cg}"&[$qgʷ՛vENx ;"b3#vo"֮YN_rU:$>Pl="bU( c½I _Joz&F}&1 ,JZHC9 o#uBK/}Ҡ#Fk":XGHVS|rli?(ඹ51KlHS_%8FVJOp2Q}Q7p$d7GVc"Yê=*[׌b=5<-IBIoZGbHmWx{|P=S8>PWqOu:bnsoB הxˢ~3@cJrhSEl31+O"#rO̾4M#|_Td [1=I}!{8ei@6%tp{A4̞i:1:Do+g6{^)wg WqR7IUÌG&u0zخx ڟȷ;mÀxo4DSDFˣ{˦8zSΆnO2j5"Y>Eˋyws=& |B%2ꑟɘ]f픖3ܗ8 ̒ho+H1]6LpR!U|8[445v1n61+ )UE"~Ve^xf5L}LU7R~&αڃ2}!^"OzƔVmA7P~+HkiXJx+L cM77:柷ȩLuÀswvKQ3W%=!1#UkbB/řir%00Qk(+IS|-6ObN!w14fBm!r2]\_۾^W޳3?Phe\EK;j,m;pjiSZ1HҾ-VkT"h$Q;^yVals>li-3@Ԟle$[PrW痕HV(4WȲ ;JO+ZP ]KCZၰq ~X Za9K=nM|?,sei 0oYYK :sHhe;܈S/!UoQ̡=Sݰu bLh"y7(0A=J}PђMGK<ԂWAy1Ϛ W%Hg豙b=v̬{P6*l^+eM ^o䁕-Y> stream xڍt4$jD;3 `F{/щAHB^ zщssZYoiqKZ!,r8H* @~ Yru3B]0\]'Aq8@E@E@(/ E qYTyJ8,ptآf ?t,!p*e uDh qh!,aP?J٢PN"<G$);hBP75#d<m[_ ar@h G3\VPp @ Vr'C,-N' n9@r*<(8 |@~wIj t9;O X,Q h7^ `DsM/+?5PT73cRR77 @ 1eA`'WnEӿZv`M[(?,7SoUnHwwCahֺ PEuojUV0W* h%Hml aH9 ei3:C!_t_1,$B$ZmkeCzgpK/'>zh jMm/B3.#_&^?NtuqA,e>ğBX7UKRsK?Thz%;f5\&7 3kQr>Y>^}T)pHnEɭ$S)wzRt"%.]hZĭl t$<({ҤZ]FX&E:kc\jqDZX9~{ROS(XVFFJw*[+kEC.bjREvuJ$LhԻJ-2kOrh-V$b 2@dĝ=bOrWt4θpMdoF YwXi1)~\}Ƭ94q< s['ejgʍ9B2Be. V2T2L&қ N7'L:OG'fӊٲbN\o7{4-i&g[[YOex:Ō?ȃ*. 6S Mz)ߴM(E;?q r>Bp+fś]"0֙{mKz5`3beI N{V /d]ZH`:2b[EfaTJ"YLDEȰ 䌾}1Y|s?S껻{ Pl;fsղMև݄ψ5Pέ O-L";χ˝\\! 48>ƁQtUO_R SNTXTV҈8w۳ny똷Dʮ:̘sd~` a)H:NbS\f wϻLrP uXUu9t[Uޭis)Kl:fjXT߬Y~Mj^3h>= WO0`(  Űb .6VlH0s[I379ĥSqa+ U{ccipB.̺VCJyݩ닐լj[& 8m.nV39}/ǵ8lJ_'ƥ'k=؄f˖)b=G^{[Sv ^!z9Wk1Z-p P5`oꈲ8TRѾMe!6H`Sb u.R(Tnb$HY%"~U/?GC^nm^ ֌ۍ~?9r>Re*cC,jAiMS_nhn._]mSD$ *OH} 2^V,2.YXCO|Cl*~%Xm\Qbٙ(Vi 'F̞-)E($tq+/bl+$(!_*9veɱֺ A ϖkKU*7ZEփ,jg[2/)U΅#n=QLNɵc)u|/vS˘"/2AC /Yd]MԊ4X"Z" fn9H0 rKKH;KtElJ\ u_!9[")w}L<4Q/^"U>m^.H#c$;7?6.%Xq-`cp6(;C__%FIMF ¶\d ĨzKL(N)W&}3$0HÓ H:@DZ2ݿkQkyq3:O͚sGV肵z-wˍ9HQ숈C3F3u[pfIdtQ`,S,o: \ 8]o:Q;fV}fM59F'4gʞ)XQ~);EZm-[4!7FXp\p3+> Lca=TN\FՔ)>ñL#X&q,f *>^,&Tlb ʰpV}ޕ)x؜4#m[ *21L%mudfVճƷY, okpތL:;|BHٓ۹mWp-7 b (;wmd_}EnSrHyD`JO`|5;j8qRGqIƧz2k'Mp{Ҕ) Dˏ iȂY9$ɠ .ǻo'eze? PFKSD8J6"%43R>:2x X[-8 9RHPN{!6cݘ7}rQ9V\T^{qh3uL L"<]ZFu4{{Н տ f cCҸ-TCⒾ%*3˻d?jݏO]yh3`!eX!E!w&u+y\9΂PK?G~l$`IyqKVEYq_6Ƀ tifku9f8o'hׯ,t 2~JV<~b[͸11|mۣ%nNa%uFSL BvA{5Ynqc`^@X !aOxy9SfZh'a9z'PKbD?oqw[dkX$<~#W-})̂gv_t0Ťߓ岌.ýDe`U wAm5^ϝtFjuzdD)i^gܱ ~MZ1YO_(Lk-<@Z0b 듴0Ru"`݁+p.MEhW Xv甇DPIauB^<&}2gu/ wÈF;3Q̻Gtd;I?<)\:zޱ5ōX"%͵kD&r24Ol޹rުeAzTɅi*zP+skQWs( =Y=?r3~d=g*mdnڿVWRr35++9"CajG^'qh>LOH,Ln$rle^MkcS,OZo|[fZ|m2+*ӍT1xAPnJ'Q-LRMS񑼲7r+wzG0=!J!6yVпlT Uߍ*^)lG:8xAѾHzPrFDJ!-̲TL<VD&EDO'Z-y+Ո"Cu?;!߻URA!ÍHg]V8o)f:[+u+!&~7RPo]Q_9>P:N.:ZMJ-HaB1lͥe%7꼽"-daR SV5cV^1T[ ďHMO"QWIk`wdNwϑsO7w<7̶cnc}X:,vڕsl7wc :Pv`bclk:DrF!aendstream endobj 496 0 obj << /Filter /FlateDecode /Length1 1530 /Length2 9293 /Length3 0 /Length 10303 >> stream xڍwPo.],%Ͳ4HHwwR]RR(%ҍHJw#%?;gٙu?םufQ[G p44TdA|>>^>>~VV};w8VCB?(rPMpBT=$,Mtrȃ=U' U!ߏv$&&;@j#`w[CFsA}+8 vturxٹtnPWO5wM#xqXvn9ܽPn"|<PWCz:@'t ^?3qrt#|6 RTuvֿ`Ӄ?l[= PZA7wa欀srt"p~'o < : ~@kg"@]Po-w }g.Fv0 pw#`mqXAm8F0Ca yd 2k'_ Tҕ1UNee~< !<<w m_ އA]_2`kI8KAPb7<|%M鿣KR ?`G;_z?B 7:;a!d6 isSZk۹Cl͟vq!1a?;d{`WW%> !a㬡E8?J\q~ߘ('eE?HDT=0A|@C>CC> &ZC?M0?40( r< kY=>2ZS S}̴LF̯%]c >Eqt*ծZ{s ano]'3$7_b⡌`R ࡄdV|hSaq;y:@+g_hwn9fgY4cGV017ֵNxӖ`v+!2>TyrMW {|7؆;F}j'~"QA%*&ӁCEC0W؝FB)%\oN5&ڬl(+u))Q^P I?3n˯A.bPg]( .^GL\<-ԭ(LT,$3}%%wplB%Jhڧ%o»2xwЩ5/op]d@ mCHyIf lz"sH8p4%LB^d>2YTō^&B$w|9iu$j€AVGKixtmv{!louDѫ /3,U:jҰ٠Ee۱_i: :Vy: Hȯ탡죙g)7-LYy>N)i(#S& ?̓]x?K i#֟o"!1[^CkOP=0;S`sC}mݘ[/&Յ,EoB^Ȕ%#BFB 9 HD|φ ,P&β\}3o_5.i,)ޜFt4=%= =9"rAt wEǞy*]=)}pZ8+Ib6g:o: Wx4eODk71sr}3Gl#rZ3e]IQjҒy.;MJQ7v6r;7Ѱ-(QH[47xB_l=yBJ+*PpKvC[_0ua\䖮kZbSJ~v)LAM'Ay|`67܃GkVb'7@=gaĕou:ޓEܩc~2ƃ}txq(}S?1\( ~;y9ӢB5g7Uh!hҥZ@G"mO(U>A~s?}1\1Rf.d$HqB*ܑH^8A.W7Rʐq ] +Ae}e" CqlliRGLgI>Ad'e_%“ ?4F;T2;#mvn8`#70T7"ACDQ%Gm|Z@z'SF.'O~[y#29 Z {T*s-9RrӟCB)݅%R#EA^cMx`s0*4YNKDK'rPVsG/:qwyW.Ӕ}Nmʁ<)mfѲRjGN|=/Oϰ"v6^PDe9D_h$Rr}\R~W5uA ,׿?|1?吖NrG(14΅B6y2-ir`o0#UI GԞũD+rۓY5?.ڢf DТgҽoc1$O,I*LN7ZG92K#LעufeK'`'t\XuBRwH`+ ߟv0P:_gW*?ci'Qߘ]nDK5Syڋ1Oܷ2.iB9d|Z6?xk_|YA0hkJX]A3P nO(vf?i׾uݫcU*Z 8L@|v XS;}|]EG,cSSܯdǍ Fs2Dv4,i:Pt2Y RploU2j}zͦ,Xh3#b{:(s=gbaU[~;9nL̽?}V.5lY+Wͫ1D@%֤'MV4 £!E9ؓf$&Q#7_וZHᥓXNXumooV_*7F-$S-=Q2 C) f+i-ǒdulx lE,bvи}pQ;UPO&)u@ŦFeŶFs'Y2E[@ЇXJN`comC#U{$ Yݻ?^NAYQЕjɄAq#!11oj<4W7;_J{1JF(4Wm85cL2&/vy<<\ls9un}ڙ=j"50q0PFN=TA\nA2LO,iiݍdfapvQ"v|3{E 2vod ssfM5FEn 5 =:DJBVL@&ȼqk7+[ z=uw.ΠRLM)ӈE-BloJMtMxfJKwJ8[C9$Cߤ` 9H*rbgpt*=]!jIs rK xBQ $Bq3UcM7{27s]N$hj{wצQ_*7~{؁:ԺGL3i\jbE˨"Kh?8/m4l`Y$t{SX{:;T^(jȼ$m7%d0~L* ̛ FցrTؔ؞$ L'?fD3[mX4@9L-O`qDM7bTi:0T94DSJBY.3r* k~||X>8A]˛hGz~'&sE]p]uaH 0W g$J>(n5xB(h}C)s>*YyAi:΋'Qp yR>6Gw5)Kމs.EґNG'ZȟV:Z8&(k*ToU0!]ܭ'VTzN}g?Y;#`ٛɺ[`~dKQmd');RΎP*&mR4f[1 !♉~ȶ%0Nް3f trFe?(^svc ${tCN%SE7#7j{b]w.G ߇MGlYD꼼I-X!@BW SKܒ{׫ow{ "wN.f>zN #DGkgͺk^B%/ܹ(QU7̣%P]7B(R| jQ;ɈP{43_7qTqhe~%3mj`izc!= Qh)V5"M;~~=^DZoENj>i%dhg^ы7^,hUЉ̪n. b,r#zsl}Of5`TߊpN$G4 ϕրD˯,6_"3(68Cl !g;'ʿ] u8`i '(`L; >-hpnRd?xW2,ZqUT2Š&j-\hN )ܝ̐7 ~:ǿ e91S 7It1"\Z&;Qiu@=З僐%(Hnn6#hXmo87:GD] #[tSOˤ14'۵{ɨh^*떽qt_/ˏ~E8r TDDO׏) *6;$k &jua9t1]Jf; 8Sdž77kVhgNgF[rbOV35!/$glpamN:2Uaә6X٣Ԋ7Yf=ݮw 8xtjK- F[SN<,'A̱NAE2#EKǟ=" F% GbwgPq\@n6hqI+a|JxH3Uw~:qhtXrΣK۳sgsl>kM](~+rGb~c2f}${/޷'Ԯ/Hl/łd_vz~>khL!\N'jjsDN`\s0y@u ×D SF.NMN×1VǢiB8),V|CtI(?᡺ezQ=}S(ܳvMcc^Rc Rn)btxCL9# jsg0rTэZ۹@Sg'mͤEh/KQV@Oܘ 9=}vNɁQ?`0OS+C3x}-IYvK4(Tg0<ׯUy-:nh#o峉u|gL K,ΏfL|ˇJ~21G|[y-Ħze3]&myd;~ag4|o|,Y=tjsynaF+m+Qbr8)'cZY{DS6|1*xr 8nۗM[*?f Ќ>q)cUُVJݭ9d $8`[,e:P/R>ز-7DeiYsg b*[L,w;nhV}K p}WfioR p4!):m.;TSE\#w#!.>ڄżUwOG޳wD"o&1J^%X{мF tv;bI6ېkws,(h23G-295Ϡ+ڤ,kDRrB)cHw̈ٔg)2Q^,rWhߪ{ E\1˗ EJ4Q&pg%CxW0ԓ̯־^}~`뛽i=T#RZ{sm%ַs_t4d2mv1NU6DLg $vJ"ZBpFת=:%~VNdBƔּpAD)XLVWx޺̓RI݉~Ghs3P>A*ͼ-P~HJf>2D3rmvENy',@Bu ~aWqu%Vº鶘KW#rL*M$hK[d{jR%,VΞ]=NDHB~7߾(k rn* S7C̮323'⬓U?RW/-Mc EӶ;+&{fEÄ+uc]we "xe!ۆXkEճL^hO:8h\^aP2;DF<>$ #X~V辑c}a+;nPS:\i̋򉚀+#}W:T$sE"-*~eEU[y#& t(jNm~ɷ)'ڂj+WEMAUnnD=yBˆWW5ctŊ s6^\N;G/h[֏hx5 7m^V!)k;!T97+dKi ߗ:o?ų9XDŽKgyw)26:BErCl>l։~:뀝"#ݸjAn0ʙ[D\vFgMZpɸ{sTNa{oAW@̀ shyBN>7):c̕k,v&K`DKޯ UP;oB잛Yԟ M_etNGsDx IǑ o{l[͗6=+V3Ϙ(K(.N^s#Pϴ2To5ͨ46OVh[ J>}g3 @C=\>ACbA, #J;fh \gޯNmedo rc R>U0ETb8S~Ѝ:OtȈAP<fL񍀏5ɜ{G[n=&s8E~%wܽJn'b18@}wa Cb2'zCF Qpˁm12H~x^6/oG{M@],=d9HF#S6H@7W>o59ENL&zEI]iWpYMˆfp㘎% 6ѠI8{ xfDCR[[QF3:;wƽtCOBZunT}KTޑ+O2M9Nx+gq*%I}R"yl..W+z/G:0[]tIבxy'~&YU%Cc cF|F2Nl`D(+Yi`UDOf[r#ěIzQN`!+ ,-ZX O7+Ѳa,Vh,=j128~g{3Vs ٰlo-oun3@o܍7`ހ-fu#OGvQ"%I"Җa\r g~^9Fѳ4I;Q^y4{;זV܅X_y_I;t=V9nG\]o> stream xڍTTkAPnfhPRPRA$$E)%EPBFqsϹw{׬ͷ~v|paF J(v?aw Zp2cVE+H |*O8 {~".Ae9 ?װr1A"<*0X>G8 /Igc?w CP;]pcHPo8 Z $ձno{_] ~ ?oVX٣~]XOMSH (_@H $*DA)K?Ct@@bꯙB,R@ +*'=W7ġnW?,u0X蠰b@j ]01 +%#B qa Aoԛ+ G?04 _1`.دK!(8̯{iñ HD%$POO) U="Hp@yg(@2#miH'?`k[X|ߋStF.2{/*+1cf]Ōb=#0YB`7/8D#]joC;2l7+1 z,箪hAMT$q~A G\T<ܥG0+︒'r+Y]_1uZ)ZSL'M7}Q 4ج4>߮z-(QMˆ^p|d2U)(O%oQ,:9kYS$L2{^UU>RSfl*r%e:D}]e5>$}VӃQ v޵ѐ=f܏>?Wq=3MRM eos.oIePՏ<=q#mHgl\$hѝkP'23qKmt1$LY@!12.3.|ҋ&_~3[Ʋ]ZDIWy?6_6]%]܈@R Mf# m]X eE1~@=(Bђz܊;"fVK}vj>y򣫠 { ѴiȨA|j/c}LA{[Տb^$03K#֢y/UG_MB<3lqe^~ž^껭NjSZоN2SunizTzy\yȣВ?􀔓pėJ>UFZ9v$lW*b(\/Ϯ^0)Uo.沦==<` ,=R>r=uk$_R'ڑɶAO^R;nI#ZU޸m&TxH Xۗ* dDL6ߎk&Zk||"ӗ9jKt|4aL&5O MeF\h{REѺS>c岙dI=a?1S =RH%i!2xTCJF k*Y>OSaF~͒A+EIDz2sabhys|ھ6܏h sFɣ9hKyjnFYW@hǩ9EjaH|cGTD)_z7Y"{d_T)<d++,K]/đwb!pd#6usTLJ,hsY7t" bv%ܴ~ZnrDJ* &[OmGtFyH% {O8d /k[+/~c=]%9R$~yuŤޭ׫ǿNwR0|Ε>c~|wa| mRKx',iP:bZ l 8:wDŽE+b/_FF#+ ϊB=,PiFYZKWR:1O辥mhf*Iv]ܤ[9D/5lS;EVUMÍ"wkGD k..m6cyR%0۔8CMD q0{&Ys2z#LSnjszf}IF Su55Wf\'溁ףQ¡r2ͫ6(v6Z+ƀˁesF6c21?7Sv'(ѹXj:u;2*[]~k_ E9zD|\@$3ʈp* 44~LHY{0[M}#{fQ8VI|Ƶz/Ƃc 2ewYcՙ^}̦4Ko~lFNec9A!]ILXĻc,QOG! w%FރxǾpՄ&KvUY!m>#<%ӕ c-yѫu*@"d IQhಕtqqvw']QSgBnS >g}[-40TjkS+|p2%Ӽ/NpY€tjYB)s[ɍ gx a;~ &Һ.j~yt/L+륅~g#77*ou7w ϸh5ȇ<Hўy\`o7m}8{YEnhwzmu@K&1,3%}5oc%RƹVQQ+<\3Yﲄ?jcZ{-p3yJA4t?䉓u 'B1GTj59Xjdבd'ٚcnwh:]9 nbٱo/")38 vQd(#5s>uv4܈}e"e!\+*Aa=MAjvIzikuEljӖi:g҅+jp}Jī|]~aY|`@DC2Ӭ%IÎEpÝlKnį ?0wb-YgzJGY(pYބY~Pc.V9Ozd }ь1Iqg)cKxo]JՌUb:eî͆Ī0u՘=>4[9eh`}c\hIϝFR+StS߃ЏXF}hdJ(NSdL!]NPLA¦=3]%u$B>ƿd%{dt>߀u}KtSQIeBOW*󁋻ĚċDršBs(k~J~x[UKr$sSaEnXi/ 973[7ɘKa1qV{uC[%ʅ8y4-炓SI.CXM]P{(ZןЄ~񶹊vd{bS9ď^\&$ q>"GB̮ͽv[íBw늭hS8?X⪆1<^mi!m _5-n^6>1ˏ 0nO|-|7d l}fE|W.黇)<23C=Lf qwlHрsh 2Ӿs+`|XÁ[]o 78%ѕ&p.p@cendstream endobj 498 0 obj << /Filter /FlateDecode /Length1 1390 /Length2 6200 /Length3 0 /Length 7148 >> stream xڍtTk/!Ȁt"ݝ" 3- "J#)- zs޻ֽkϳ{g~35!<|9 ]Q?3>1]\!p˹AN@iaU7(( E||bqAk@1\ vdlVp#b4@;# Ѓ[A`{l@8zxx]y.Oع@ vq[~ 9ƃ зQm 0B0Wd@詨?] ;_ѿA`AVVpG' @`:9pd<,et |MjqBB&y 0k9#p՟<lu/ku=`>6ͯݜx `g7_H?:[0 '"`O+;_|N`?  r.n`?m!V%';R ##7!}2Cbz{J*ZƜ$+ p @>>, _]44";߹4HĂlܔO w ݿ?(AlzeG ľ>V-K*B<"k]!7lHVY9  W$H n]B b$"ih `/ @l.x)(E>Kdo0/[We+7$~#$=Vxp+U/+dh<7%'7auiq$Nf/xr*ۉϷ$+kݱ&/=#TwrKo8972g;j璝{t)yVwNol _sDL2XfNQ1b!p8H=&ON'Hߎѫ&p1^揽^(wmf6C?!c~zƧ`eЊ+ t{CSe9 85/:JTRrcC"`W%O" \Z;-HEבXWaO))5e~RvwHg(Y{K̰~]I~/OT^uJ(y&hj_SO;wjHӔ7B݆LiEQLfR°g].!\5W2W+;RĚ#'R[O3Z5c$DY-pb;ۓ,{ !{Le b+캮ŕ`Evûru#L: sBf%֙\-G#U1D!Xhn3V-6"듁?g$oz:pY<)(IE^R؂jh{kiģ@}ؖjtgC"! [MS9o5N?c|3J_s}b5q\Ծ՛Zeb~G.4q]k资|Qb6U#ٱobsUG`$7}b\ܖ bOE2gv$߼VhM=0wlG\FN, &mQ2`Mj BfMe\=^ 8AóßnM PݽuQ!`|JR6:S+r18iN pc>JS$0oD[GI&O iy=3_Vy5Rm!}i6Ő3F[ڭnZCs3^a"猣h/7QbE*nve24WL7 |Sϧ1-%-c\b^jt-1{{]=?`we1ۋld(|v7JMe7?P;u=m"gEڇ}j G>`v.>GU6ڊc{t̮/*ޯq:ڧRSמj7+;ȇēwuaG{2i=^LL(\ ?9tԠ ./ /|'ipr/5dZ[½;Socw\`ޫhf#r14fi͑U3p2ml ;'2JY kЩg?8R~8 8%^1 cJ tr7#mYTs I'{(X>G9v98b аz6R|iqYѥ{1aPzF.\Ի4iAļ8biwF~v`{ W$ƴ]Ee9FPd:L$4'|js1NJ;f% x[3\PV(;XKE`ս" oG(ZirSPߤ)&HX0~r#;X  >Sa'BzSga0JWqrőϹ)K!Y{ěz˫@'УKթX{ l㬜Wn}^U8+{oc6"R~l?0c׉!a 8o R%MFrWUIޠ)< #-N㔆U5p1ltxh">:gpŕT±HnZnDRł5m1Giq=eh&ǽ`j  9(fqie֠3PC&,f}WSs=[w8myth s}%=U55U8huPf܃gӡ97ƴc!֝bk9#- ,,K)C$WdJ3J+x"jNՏ8b9%}4|F| o/@WmDyqŀۧX̹1:iۥH xe{q_rՀ}b%,4#kMЎP"K=KUyi%!*Bsm5 CxBFasawLUXч&6{/M\PTy?i2:=8Tcļxzv0֚ k# [(,?-49lP-H:`da@ ,%=aatq'B=8Z=T34POG!P~s5E"x\/-Z7pRGEhHeJmUFz% v8們ݛM-k=A VS'r)@%xrKĕK=US3D D3Sⴚ`N~0Nܽ tQkFWN+T> gFҾ+$nǧG7cYk`wtV#&땢`B \>σ\4nXFS n)E'r{ksJDhPyc=Z [* KSQbᖁV1I|w|TSPښg1UPXԤ/MLbv%@?aE_kBPqZl-~vᥞ.TGO#q&m(њqrpA6s9K`$n~˙o?FC_(o1) *V"4~XX룩A DǸ][;N/{\y+hoiģYϴR75Ž !qdq4 mgͷ͸:wZ.w*Z}d _|2"߉E|x_&'a2j?G1mǨyO^H,J־|V* a!JFX*{ 쯦2eafblTlK =Y72U66b_wjew ;E_=s+>~d|pI]H0;jM'Qlʑk}l(}1sKOv[Պ̠DA넟Ϩ7ϔR>=E| `196f uW *gUxu~p!+vOS$LoPQ=]g$%1[Dš l11A~[ :6{US.|$>QQDiTlc:Ef>Ces"] ָ $^vE=y _5zҎ%^rWRkMwo Q#/R]Ƒ oz޸g <qͥN SvhO+ٮ| X:[^=>RNe9S=.PΛ!y<̹摑kH(pX}]_EYf0}˲ma#U.%YxKCVmKNdtW::@Tjō_΀] d.T]H|qeamLf۞_G4[kڄdӗ >jbN:[08q{2LfAN0X(5E1ѡȊaqK X}Q٨xh8 Ňr%dVM9ѳjnޗzEU|D̉fU{RwӐ? ΜT>$~dMMiXXo20/8LJsIh."MPwHB6U|B-J&8[9}'x!:ԫ?;KL^ %Q5W{'@31k^ 0KkXT?ڭE"6Z}L r+3WKBA|w.ȣ@XoĴEmjĞLUSJ;@,6FvJ= D^9 (&t]x W*кPKۯW9"LPendstream endobj 499 0 obj << /Filter /FlateDecode /Length1 1394 /Length2 5993 /Length3 0 /Length 6951 >> stream xڍuTo6RictJ#- ۀlS@AAZBQPJx{=;gקu}nh"G?@Fp¢ PMR`17) ,_jԡ8>qwED`,/G4F@ Ю__@>?PTVVZw8PA (>pWA&hG >UNDu{E~!'4F`k` d 7pP 0 pG 8D[h@q {7@Q .P7eC:#@8/r:cx }w9x[w<, taAXE~߲ vqApXԑ "6B{|(ݯ!"f(;B[ G`Y)) Y 9Jom'uEC v @qw6‘0';F9㗏Az!` y ;{((r?4x597ã9K^ϊqUGJYEd|aQG:V6X=Dz[~ tg׫uyˍ=gJˆ؛ktq6xbIIu(CVc?h~QY[RJ3ϳoﻋo5x:f= w迚z[U-^y͏MDS wL] {v鞈lFaPpy{sebBfH:UhZ"TI||T_ | jwx-Ƴ;,0$!QKQAfC\log;j՟D|+3$Dsf㧏5~Д@ܷPsQ:gDIeQ JEkI!ӡ%qSnjK siT>b}u1'Ur9q @yZv^ 57uiߪ4PEsg!Q|D~0[ek-/m~D,BCK4]8|>Y.<{o"g[As4'*&LB!xF0㙆{)מM9\3RXb .ӷa\I|W-He[A+F, ̵py)]ߨfP:.).7h'S^)&j]βye*؛ءn'w%O{ŠEiԛ#:SL>μL֊`7(k6,}7gR%hf*h9qۜ/O+ŸWɵu?]|QOm3 yGŅ"MZ2υ~؄F[ V0#\p#fەALU,WwS39~=&Y;ei.arQoeÛ="հ}ĥoec< N&{݌+r5 Vc; ŐD(9J6; _xTKY8'jtr!3Ab|QS甹^Nmnp##O@#]2&I KC~+L?tDOW|}i:,H!rCStv݄0<Ns]"s,2%pWTڄ'Qί{ D]{5 o!6<ǗTaupEOv25@&NbV4O2ٕ7I93 4⥽Σhe|nqZQΒ6/AEY|K]IQ秛ȟBmHZٟPEQ0.7qwAD X!.w~-6>ݲ8a_rEvyY,$#)C9+re3,0oi4C|Ê gМn\2Q,u X(I-qD-R姅7G#oc'%*|o&Оϟa,k-$W~f&I*5 LR^,cxF(܋^x)>+w4zZAGi2հ"mMv{ -Q9FA4 | O_ i&eCP!,jzgj *j}'ځw>G;,Թf"?T٢ %GZ2(ܤ03#w)t5EqB5mOq<4(FsEϭ˶ <y>y1SΥg\pW'/s`PpTR[T]i](// %&Qwd]jeYΖҵ(R~]bNy[^bn褼.A=T^b:z<҈"WէK/_,&~{ RYt]aS.ȟ@d@f.ARl!MJწͨ/QYǏ]b4>$x8)Sn'չUhL'*mfG( Fa"LC Ǎ71r"<,UYOqr" 7]/ v#gd7C,Tr yɏ"b^'*oACJ8g@>o!Ms%!++]B^ ]Yhh7sg}, wɗqy.&+w. ڴʭM KMl C9qZhC qb~~R'=VSJY`'9L8+,amti.5nQroJ';7 gO[(},f`hct1%w9|T YhhtV5X5T+Tƽdoq&%M H{0 э-~i}dcR]دjysCL-׏%YMc0 t&X[G~(eZVR讉(v:+DfTV eJz{DPhg9 @vnҨPzC)}ZST t2$g9z0wx)]ho^NX5iCUH;}սYqH@A9Ъwv]Euh,`YeП{Kl|/3MeO1Xh A+ iot6u9e<:n3A.uڛ]eDxON\ ѻ%^`^ΙDCܳU/=VZ<ת랸3AgYͥZd'%&n*i}Y!QT[eLߎIoMR p/g0?[jcJbL68E`7Ig^ ړiTep/*+t̃Q2|7%r8PU zsVywvl˂V"H ^(- >0oS][]sM='(8LSѤu,f3@OɃnh1 X0SJ$S:ᇗGVk{y}yg `4,t7TU}km%6gr_0oy|˷ciZ!^U~: V/T,P@*w^-`_L,Gq{hE%{>i#R\Tn/Ԅ$J\xqYoG-U꘼Ӑ9TjF'[H6lг%)&9-q+'m6R~hoڲQr} j ^C{~V8ki1> /W [ 1 3 1 ] /Info 91 0 R /Root 90 0 R /Size 501 /ID [<9f5fecc440a02584667977ae79306320>] >> stream xcb&F~0 $8J(@683NFID_m90\4M!$bT_ɻ D_ˁ$Z[D&I8- "}c@d4*QnĚꘞ˥2FS( Jy@ (UpSDJH5'i U "Ux*#%|0 2g#}lx"HΓ Xf0{)XM'd+A,f eKX09c7q endstream endobj startxref 347443 %%EOF RcppEigen/inst/doc/RcppEigen-Introduction.R0000644000176200001440000000072713236220366020337 0ustar liggesusers### R code from vignette source 'RcppEigen-Introduction.Rnw' ################################################### ### code chunk number 1: RcppEigen-Introduction.Rnw:8-13 ################################################### pkgVersion <- packageDescription("RcppEigen")$Version pkgDate <- packageDescription("RcppEigen")$Date prettyDate <- format(Sys.Date(), "%B %e, %Y") #require("RcppEigen") #eigenVersion <- paste(unlist(.Call("eigen_version", FALSE)), collapse=".") RcppEigen/inst/doc/unitTests/0000755000176200001440000000000012253717461015662 5ustar liggesusersRcppEigen/inst/doc/unitTests/RcppEigen-unitTests.Rnw0000644000176200001440000000000012253717461022214 0ustar liggesusersRcppEigen/inst/doc/unitTests/RcppEigen-unitTests.R0000644000176200001440000000150712253717461021664 0ustar liggesuserspkg <- "RcppEigen" # load this package require( pkg, character.only = TRUE ) #load RUnit runit <- "RUnit" ; require( runit, character.only = TRUE ) if( file.exists( "unitTests-results" ) ){ unlink("unitTests-results", recursive = TRUE ) } dir.create( "unitTests-results" ) path <- system.file("unitTests", package = pkg) testSuite <- defineTestSuite(name=paste(pkg, "unit testing"), dirs = path) tests <- runTestSuite(testSuite) printHTMLProtocol(tests, fileName= sprintf( "unitTests-results/%s-unitTests.html" , pkg ) ) printTextProtocol(tests, fileName= sprintf( "unitTests-results/%s-unitTests.txt" , pkg ) ) if( file.exists( "/tmp" ) ){ file.copy( sprintf( "unitTests-results/%s-unitTests.txt" , pkg ) , "/tmp", overwrite = TRUE ) file.copy( sprintf( "unitTests-results/%s-unitTests.html", pkg ) , "/tmp", overwrite = TRUE ) } RcppEigen/inst/unitTests/0000755000176200001440000000000013100733213015076 5ustar liggesusersRcppEigen/inst/unitTests/runit.fastLm.R0000644000176200001440000000621612565743640017637 0ustar liggesusers#!/usr/bin/r -t # # Copyright (C) 2011 - 2015 Douglas Bates, Dirk Eddelbuettel and Romain Francois # # This file is part of RcppEigen # # RcppEigen is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # RcppEigen is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with RcppEigen. If not, see . .setUp <- function(){ suppressMessages(require(datasets)) suppressMessages(require(RcppEigen)) } test.fastLm <- function() { data(trees, package="datasets") flm0 <- .Call("RcppEigen_fastLm_Impl", cbind(1, log(trees$Girth)), log(trees$Volume), 0L, PACKAGE="RcppEigen") flm1 <- .Call("RcppEigen_fastLm_Impl", cbind(1, log(trees$Girth)), log(trees$Volume), 1L, PACKAGE="RcppEigen") flm2 <- .Call("RcppEigen_fastLm_Impl", cbind(1, log(trees$Girth)), log(trees$Volume), 2L, PACKAGE="RcppEigen") flm3 <- .Call("RcppEigen_fastLm_Impl", cbind(1, log(trees$Girth)), log(trees$Volume), 3L, PACKAGE="RcppEigen") flm4 <- .Call("RcppEigen_fastLm_Impl", cbind(1, log(trees$Girth)), log(trees$Volume), 4L, PACKAGE="RcppEigen") flm5 <- .Call("RcppEigen_fastLm_Impl", cbind(1, log(trees$Girth)), log(trees$Volume), 5L, PACKAGE="RcppEigen") fit <- lm(log(Volume) ~ log(Girth), data=trees) fitCoef <- unname(coef(fit)) fitStdErr <- unname(coef(summary(fit))[, "Std. Error", drop = TRUE]) checkEquals(flm0$coefficients, fitCoef, msg="fastLm0.coef") checkEquals(flm0$se, fitStdErr, msg="fastLm0.stderr") checkEquals(flm1$coefficients, fitCoef, msg="fastLm1.coef") checkEquals(flm1$se, fitStdErr, msg="fastLm1.stderr") checkEquals(flm2$coefficients, fitCoef, msg="fastLm2.coef") checkEquals(flm2$se, fitStdErr, msg="fastLm2.stderr") checkEquals(flm3$coefficients, fitCoef, msg="fastLm3.coef") checkEquals(flm3$se, fitStdErr, msg="fastLm3.stderr") checkEquals(flm4$coefficients, fitCoef, msg="fastLm0.coef") checkEquals(flm4$se, fitStdErr, msg="fastLm0.stderr") checkEquals(flm5$coefficients, fitCoef, msg="fastLm0.coef") checkEquals(flm5$se, fitStdErr, msg="fastLm0.stderr") } test.fastLm.formula <- function() { data(trees, package="datasets") flm <- fastLm(log(Volume) ~ log(Girth), data=trees) fit <- lm(log(Volume) ~ log(Girth), data=trees) checkEquals(flm$coefficients, coef(fit), msg="fastLm.formula.coef") checkEquals(as.numeric(flm$se), as.numeric(coef(summary(fit))[,2]), msg="fastLm.formula.stderr") } RcppEigen/inst/unitTests/runit.transform.R0000644000176200001440000000547712446006401020415 0ustar liggesusers# # Copyright (C) 2012 - 2013 Douglas Bates, Dirk Eddelbuettel and Romain Francois # # This file is part of RcppEigen. # # RcppEigen is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # RcppEigen is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Rcpp. If not, see . incl <- ' typedef Eigen::ArrayXd Ar1; typedef Eigen::Map MAr1; typedef Eigen::ArrayXXd Ar2; typedef Eigen::Map MAr2; typedef Eigen::MatrixXd Mat; typedef Eigen::Map MMat; typedef Eigen::VectorXd Vec; typedef Eigen::Map MVec; ' definitions <- list( "ar1_unbounded" = list(signature(x_="numeric"), ' MAr1 x(as(x_)); return List::create(Named("abs", x.abs()), Named("abs2", x.abs2()), Named("exp", x.exp()), Named("cos", x.cos())); '), "ar2_unbounded" = list(signature(X_="matrix"), ' MAr2 X(as(X_)); return List::create(Named("abs", X.abs()), Named("abs2", X.abs2()), Named("exp", X.exp()), Named("cos", X.cos())); ') ) .setUp <- function() { suppressMessages(require(inline)) suppressMessages(require(RcppEigen)) cxxargs <- ifelse(Rcpp:::capabilities()[["initializer lists"]], "-std=c++0x","") tests <- ".rcppeigen.trans" if( ! exists( tests, globalenv() )) { fun <- RcppEigen:::compile_unit_tests(definitions, includes=incl, cxxargs = cxxargs) names(fun) <- names(definitions) assign(tests, fun, globalenv()) } } test.transformationAr1 <- function() { set.seed(1234321) x <- rnorm(10L) res <- .rcppeigen.trans$ar1_unbounded(x) checkEquals(res$abs, abs(x)) checkEquals(res$abs2, x * x) checkEquals(res$exp, exp(x)) checkEquals(res$cos, cos(x)) } test.transformationAr2 <- function() { set.seed(1234321) X <- matrix(rnorm(100L), nrow = 10, ncol = 10) res <- .rcppeigen.trans$ar2_unbounded(X) checkEquals(res$abs, abs(X)) checkEquals(res$abs2, X * X) checkEquals(res$exp, exp(X)) checkEquals(res$cos, cos(X)) } RcppEigen/inst/unitTests/runit.solutions.R0000644000176200001440000000642712254726265020453 0ustar liggesusers# # Copyright (C) 2012 - 2013 Douglas Bates, Dirk Eddelbuettel and Romain Francois # # This file is part of RcppEigen. # # RcppEigen is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # RcppEigen is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Rcpp. If not, see . incl <- ' typedef Eigen::ArrayXd Ar1; typedef Eigen::Map MAr1; typedef Eigen::ArrayXXd Ar2; typedef Eigen::Map MAr2; typedef Eigen::MatrixXd Mat; typedef Eigen::Map MMat; typedef Eigen::VectorXd Vec; typedef Eigen::Map MVec; typedef Eigen::PartialPivLU PPLU; typedef Eigen::ColPivHouseholderQR CPQR; ' definitions <- list( "dense_PPLU" = list(signature(A_="matrix", b_="numeric"), ' MMat A(as(A_)); MVec b(as(b_)); PPLU lu(A); Mat Ainv(lu.inverse()); Vec x(lu.solve(b)); return List::create(Named("A", A), Named("Ainv", Ainv), Named("b", b), Named("x", x)); '), "dense_CPQR" = list(signature(A_="matrix", b_="numeric"), ' MMat A(as(A_)); MVec b(as(b_)); CPQR qr(A); Mat Ainv(qr.inverse()); Vec x(qr.solve(b)); return List::create(Named("Ainv", Ainv), Named("x", x)); ') ) .setUp <- function() { suppressMessages(require(inline)) suppressMessages(require(RcppEigen)) cxxargs <- ifelse(Rcpp:::capabilities()[["initializer lists"]], "-std=c++0x","") tests <- ".rcppeigen.solve" if( ! exists( tests, globalenv() )) { fun <- RcppEigen:::compile_unit_tests(definitions, includes=incl, cxxargs = cxxargs) names(fun) <- names(definitions) assign( tests, fun, globalenv() ) } } test.smallDense <- function() { A <- matrix(c(1,2,3,4), nrow=2L) B <- matrix(c(5,6,7,8), nrow=2L) b <- c(1,1) ## solutions to dense systems res <- .rcppeigen.solve$dense_PPLU(A, b) checkEquals(res$Ainv, solve(A)) checkEquals(res$x, solve(A, b)) res <- .rcppeigen.solve$dense_CPQR(A, b) checkEquals(res$Ainv, solve(A)) checkEquals(res$x, solve(A, b)) } test.largeDense <- function() { set.seed(1234321) N <- 100L AA <- matrix(rnorm(N * N), nrow=N) bb <- rnorm(N) res <- .rcppeigen.solve$dense_PPLU(AA, bb) checkEquals(res$Ainv, solve(AA)) checkEquals(res$x, solve(AA, bb)) res <- .rcppeigen.solve$dense_CPQR(AA, bb) checkEquals(res$Ainv, solve(AA)) checkEquals(res$x, solve(AA, bb)) } RcppEigen/inst/unitTests/runit.wrap.R0000644000176200001440000003105512446006401017342 0ustar liggesusers# # Copyright (C) 2012 - 2013 Douglas Bates, Dirk Eddelbuettel and Romain Francois # # This file is part of RcppEigen. # # RcppEigen is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # RcppEigen is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Rcpp. If not, see . incl <- ' // double typedef Eigen::ArrayXd Ar1; typedef Eigen::Map MAr1; typedef Eigen::ArrayXXd Ar2; typedef Eigen::Map MAr2; typedef Eigen::MatrixXd Mat; typedef Eigen::Map MMat; typedef Eigen::VectorXd Vec; typedef Eigen::Map MVec; // integer typedef Eigen::ArrayXi iAr1; typedef Eigen::Map MiAr1; typedef Eigen::ArrayXXi iAr2; typedef Eigen::Map MiAr2; typedef Eigen::MatrixXi iMat; typedef Eigen::Map MiMat; typedef Eigen::VectorXi iVec; typedef Eigen::Map MiVec; // unsigned integer typedef Eigen::Array uiAr1; typedef Eigen::Map MuiAr1; typedef Eigen::Array uiAr2; typedef Eigen::Map MuiAr2; typedef Eigen::Matrix uiMat; typedef Eigen::Map MuiMat; typedef Eigen::Matrix uiVec; typedef Eigen::Map MuiVec; // float typedef Eigen::ArrayXf fAr1; typedef Eigen::Map MfAr1; typedef Eigen::ArrayXXf fAr2; typedef Eigen::Map MfAr2; typedef Eigen::MatrixXf fMat; typedef Eigen::Map MfMat; typedef Eigen::VectorXf fVec; typedef Eigen::Map MfVec; // complex double typedef Eigen::ArrayXcd cdAr1; typedef Eigen::Map McdAr1; typedef Eigen::ArrayXXcd cdAr2; typedef Eigen::Map McdAr2; typedef Eigen::MatrixXcd cdMat; typedef Eigen::Map McdMat; typedef Eigen::VectorXcd cdVec; typedef Eigen::Map McdVec; ' definitions <- list( "wrap_vectors" = list(signature(), ' List vecs = List::create( _["Vec"] = cdVec::Zero(5), _["Vec"] = Vec::Zero(5), _["Vec"] = fVec::Zero(5), _["Vec"] = iVec::Zero(5), _["Vec"] = uiVec::Zero(5) ); // A VectorX behaves as a matrix with one column but is converted to // a vector object in R, not a matrix of one column. The distinction is // that VectorX objects are defined at compile time to have one column, // whereas a MatrixX has a dynamic number of columns that is set to 1 // during execution of the code. A MatrixX object can be resized to have // a different number of columns. A VectorX object cannot. List cols = List::create( _["Col"] = cdMat::Zero(5, 1), _["Col"] = Mat::Zero(5, 1), _["Col"] = fMat::Zero(5, 1), _["Col"] = iMat::Zero(5, 1), _["Col"] = uiMat::Zero(5, 1) ); List rows = List::create( _["Row"] = Eigen::RowVectorXcd::Zero(5), _["Row"] = Eigen::RowVectorXd::Zero(5), _["Row"] = Eigen::RowVectorXf::Zero(5), _["Row"] = Eigen::RowVectorXi::Zero(5), _["Row"] = Eigen::Matrix::Zero(5) ); List matrices = List::create( _["Mat"] = cdMat::Identity(3, 3), _["Mat"] = Mat::Identity(3, 3), _["Mat"] = fMat::Identity(3, 3), _["Mat"] = iMat::Identity(3, 3), _["Mat"] = uiMat::Identity(3, 3) ); // ArrayXX objects have the same structure as matrices but allow // componentwise arithmetic. A * B is matrix multiplication for // matrices and componentwise multiplication for arrays. List arrays2 = List::create( _["Arr2"] = cdAr2::Zero(3, 3), _["Arr2"] = Ar2::Zero(3, 3), _["Arr2"] = fAr2::Zero(3, 3), _["Arr2"] = iAr2::Zero(3, 3), _["Arr2"] = uiAr2::Zero(3, 3) ); // ArrayX objects have the same structure as VectorX objects // but allow componentwise arithmetic, including functions like exp, log, // sqrt, ... List arrays1 = List::create( _["Arr1"] = cdAr1::Zero(5), _["Arr1"] = Ar1::Zero(5), _["Arr1"] = fAr1::Zero(5), _["Arr1"] = iAr1::Zero(5), _["Arr1"] = uiAr1::Zero(5) ); List operations = List::create( _["Op_seq"] = Eigen::ArrayXd::LinSpaced(6, 1, 10), // arguments are length.out, start, end _["Op_log"] = Eigen::ArrayXd::LinSpaced(6, 1, 10).log(), _["Op_exp"] = Eigen::ArrayXd::LinSpaced(6, 1, 10).exp(), _["Op_sqrt"] = Eigen::ArrayXd::LinSpaced(6, 1, 10).sqrt(), _["Op_cos"] = Eigen::ArrayXd::LinSpaced(6, 1, 10).cos() ); List output = List::create( _["vectors : VectorX"] = vecs, _["matrices : MatrixX"] = matrices, _["rows : RowVectorX"] = rows, _["columns : MatrixX"] = cols, _["arrays2d : ArrayXX"] = arrays2, _["arrays1d : ArrayX"] = arrays1, _["operations : ArrayXd"] = operations ); return output; '), "as_Vec" = list(signature(input_ = "list"), ' List input(input_) ; // Column vector iVec m1 = input[0] ; /* implicit as */ Vec m2 = input[1] ; /* implicit as */ uiVec m3 = input[0] ; /* implicit as */ fVec m4 = input[1] ; /* implicit as */ // Row vector Eigen::Matrix m5 = input[0] ; /* implicit as */ Eigen::Matrix m6 = input[1] ; /* implicit as */ Eigen::Matrix m7 = input[0] ; /* implicit as */ Eigen::Matrix m8 = input[1] ; /* implicit as */ // Mapped vector MiVec m9 = input[0] ; /* implicit as */ MVec m10 = input[1] ; /* implicit as */ List res = List::create(m1.sum(), m2.sum(), m3.sum(), m4.sum(), m5.sum(), m6.sum(), m7.sum(), m8.sum(), m9.sum(), m10.sum()); return res ; '), "as_Array" = list(signature(input_ = "list"), ' List input(input_) ; // Column array iAr1 m1 = input[0] ; /* implicit as */ Ar1 m2 = input[1] ; /* implicit as */ uiAr1 m3 = input[0] ; /* implicit as */ fAr1 m4 = input[1] ; /* implicit as */ // Row array Eigen::Array m5 = input[0] ; /* implicit as */ Eigen::Array m6 = input[1] ; /* implicit as */ Eigen::Array m7 = input[0] ; /* implicit as */ Eigen::Array m8 = input[1] ; /* implicit as */ // Mapped array MiAr1 m9 = input[0] ; /* implicit as */ MAr1 m10 = input[1] ; /* implicit as */ List res = List::create(m1.sum(), m2.sum(), m3.sum(), m4.sum(), m5.sum(), m6.sum(), m7.sum(), m8.sum(), m9.sum(), m10.sum()); return res ; '), "as_Mat" = list(signature(input_ = "list"), ' List input(input_) ; // Copy to matrix iMat m1 = input[0] ; /* implicit as */ Mat m2 = input[1] ; /* implicit as */ uiMat m3 = input[0] ; /* implicit as */ fMat m4 = input[1] ; /* implicit as */ // Mapped matrix MiMat m5 = input[0] ; /* implicit as */ MMat m6 = input[1] ; /* implicit as */ List res = List::create(m1.sum(), m2.sum(), m3.sum(), m4.sum(), m5.sum(), m6.sum()); return res ; '), "as_Array2D" = list(signature(input_ = "list"), ' List input(input_) ; // Copy to 2D array iAr2 m1 = input[0] ; /* implicit as */ Ar2 m2 = input[1] ; /* implicit as */ uiAr2 m3 = input[0] ; /* implicit as */ fAr2 m4 = input[1] ; /* implicit as */ // Mapped 2D array MiAr2 m5 = input[0] ; /* implicit as */ MAr2 m6 = input[1] ; /* implicit as */ List res = List::create(m1.sum(), m2.sum(), m3.sum(), m4.sum(), m5.sum(), m6.sum()); return res ; ') ) .setUp <- function() { suppressMessages(require(inline)) suppressMessages(require(RcppEigen)) cxxargs <- ifelse(Rcpp:::capabilities()[["initializer lists"]], "-std=c++0x","") tests <- ".rcppeigen.wrap" if( ! exists( tests, globalenv() )) { fun <- RcppEigen:::compile_unit_tests(definitions, includes=incl, cxxargs = cxxargs) names(fun) <- names(definitions) assign(tests, fun, globalenv()) } } test.wrapVectors <- function() { res <- .rcppeigen.wrap$wrap_vectors() checkEquals(res[[1]][[1]], complex(5)) checkEquals(res[[1]][[2]], double(5)) checkEquals(res[[1]][[3]], double(5)) checkEquals(res[[1]][[4]], integer(5)) checkEquals(res[[1]][[5]], integer(5)) checkEquals(res[[2]][[1]], (1+0i) * diag(nr=3L)) checkEquals(res[[2]][[2]], diag(nr=3L)) checkEquals(res[[2]][[3]], diag(nr=3L)) checkEquals(res[[2]][[4]], matrix(as.integer((diag(nr=3L))),nr=3L)) checkEquals(res[[2]][[5]], matrix(as.integer((diag(nr=3L))),nr=3L)) checkEquals(res[[3]][[1]], matrix(complex(5), nr=1L)) checkEquals(res[[3]][[2]], matrix(numeric(5), nr=1L)) checkEquals(res[[3]][[3]], matrix(numeric(5), nr=1L)) checkEquals(res[[3]][[4]], matrix(integer(5), nr=1L)) checkEquals(res[[3]][[5]], matrix(integer(5), nr=1L)) checkEquals(res[[4]][[1]], as.matrix(complex(5))) checkEquals(res[[4]][[2]], as.matrix(numeric(5))) checkEquals(res[[4]][[3]], as.matrix(numeric(5))) checkEquals(res[[4]][[4]], as.matrix(integer(5))) checkEquals(res[[4]][[5]], as.matrix(integer(5))) checkEquals(res[[5]][[1]], matrix(complex(9L), nc=3L)) checkEquals(res[[5]][[2]], matrix(numeric(9L), nc=3L)) checkEquals(res[[5]][[3]], matrix(numeric(9L), nc=3L)) checkEquals(res[[5]][[4]], matrix(integer(9L), nc=3L)) checkEquals(res[[5]][[5]], matrix(integer(9L), nc=3L)) checkEquals(res[[6]][[1]], complex(5)) checkEquals(res[[6]][[2]], double(5)) checkEquals(res[[6]][[3]], double(5)) checkEquals(res[[6]][[4]], integer(5)) checkEquals(res[[6]][[5]], integer(5)) oneTen <- seq(1, 10, length.out=6L) checkEquals(res[[7]][[1]], oneTen) checkEquals(res[[7]][[2]], log(oneTen)) checkEquals(res[[7]][[3]], exp(oneTen)) checkEquals(res[[7]][[4]], sqrt(oneTen)) checkEquals(res[[7]][[5]], cos(oneTen)) } test.asVec <- function() { res <- .rcppeigen.wrap$as_Vec(list(1:10, as.numeric(1:10))) checkEquals(unlist(res), rep.int(55, 10L)) } test.asArray <- function() { res <- .rcppeigen.wrap$as_Array(list(1:10, as.numeric(1:10))) checkEquals(unlist(res), rep.int(55, 10L)) } test.asMat <- function() { integer_mat <- matrix(as.integer(diag(nrow = 5L))) numeric_mat <- diag(nrow = 5L) res <- .rcppeigen.wrap$as_Mat(list(integer_mat, numeric_mat)) checkEquals(unlist(res), rep.int(5, 6L)) } test.asArray2D <- function() { integer_mat <- matrix(as.integer(diag(nrow = 5L))) numeric_mat <- diag(nrow = 5L) res <- .rcppeigen.wrap$as_Array2D(list(integer_mat, numeric_mat)) checkEquals(unlist(res), rep.int(5, 6L)) } RcppEigen/inst/unitTests/runit.RcppEigen.R0000644000176200001440000002661713100733213020251 0ustar liggesusers#!/usr/bin/r -t # # Copyright (C) 2011 Douglas Bates, Dirk Eddelbuettel and Romain Francois # # This file is part of RcppEigen # # RcppEigen is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # RcppEigen is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with RcppEigen. If not, see . .setUp <- function(){ suppressMessages(require(inline)) } test.wrap.R <- function(){ fx <- cxxfunction( , ' List vecs = List::create( _["Vec"] = Eigen::VectorXcd::Zero(5), _["Vec"] = Eigen::VectorXd::Zero(5), _["Vec"] = Eigen::VectorXf::Zero(5), _["Vec"] = Eigen::VectorXi::Zero(5) ); // A VectorX behaves as a matrix with one column but is converted to // a vector object in R, not a matrix of one column. The distinction is // that VectorX objects are defined at compile time to have one column, // whereas a MatrixX has a dynamic number of columns that is set to 1 // during execution of the code. A MatrixX object can be resized to have // a different number of columns. A VectorX object cannot. List cols = List::create( _["Col"] = Eigen::MatrixXcd::Zero(5, 1), _["Col"] = Eigen::MatrixXd::Zero(5, 1), _["Col"] = Eigen::MatrixXf::Zero(5, 1), _["Col"] = Eigen::MatrixXi::Zero(5, 1) ); List rows = List::create( _["Row"] = Eigen::RowVectorXcd::Zero(5), _["Row"] = Eigen::RowVectorXd::Zero(5), _["Row"] = Eigen::RowVectorXf::Zero(5), _["Row"] = Eigen::RowVectorXi::Zero(5) ); List matrices = List::create( _["Mat"] = Eigen::MatrixXcd::Identity(3, 3), _["Mat"] = Eigen::MatrixXd::Identity(3, 3), _["Mat"] = Eigen::MatrixXf::Identity(3, 3), _["Mat"] = Eigen::MatrixXi::Identity(3, 3) ); // ArrayXX objects have the same structure as matrices but allow // componentwise arithmetic. A * B is matrix multiplication for // matrices and componentwise multiplication for arrays. List arrays2 = List::create( _["Arr2"] = Eigen::ArrayXXcd::Zero(3, 3), _["Arr2"] = Eigen::ArrayXXd::Zero(3, 3), _["Arr2"] = Eigen::ArrayXXf::Zero(3, 3), _["Arr2"] = Eigen::ArrayXXi::Zero(3, 3) ); // ArrayX objects have the same structure as VectorX objects // but allow componentwise arithmetic, including functions like exp, log, // sqrt, ... List arrays1 = List::create( _["Arr1"] = Eigen::ArrayXcd::Zero(5), _["Arr1"] = Eigen::ArrayXd::Zero(5), _["Arr1"] = Eigen::ArrayXf::Zero(5), _["Arr1"] = Eigen::ArrayXi::Zero(5) ); List operations = List::create( _["Op_seq"] = Eigen::ArrayXd::LinSpaced(6, 1, 10), // arguments are length.out, start, end _["Op_log"] = Eigen::ArrayXd::LinSpaced(6, 1, 10).log(), _["Op_exp"] = Eigen::ArrayXd::LinSpaced(6, 1, 10).exp(), _["Op_sqrt"] = Eigen::ArrayXd::LinSpaced(6, 1, 10).sqrt(), _["Op_cos"] = Eigen::ArrayXd::LinSpaced(6, 1, 10).cos() ); List output = List::create( _["vectors : VectorX"] = vecs, _["matrices : MatrixX"] = matrices, _["rows : RowVectorX"] = rows, _["columns : MatrixX"] = cols, _["arrays2d : ArrayXX"] = arrays2, _["arrays1d : ArrayX"] = arrays1, _["operations : ArrayXd"] = operations ); return output ; ' , plugin = "RcppEigen" ) res <- fx() checkEquals( res[["vectors : VectorX"]][["Vec"]], complex(5), msg = "VectorXcd::Zero(5)") checkEquals( res[["vectors : VectorX"]][["Vec"]], double(5), msg = "VectorXd::Zero(5)") checkEquals( res[["vectors : VectorX"]][["Vec"]], double(5), msg = "VectorXf::Zero(5)") checkEquals( res[["vectors : VectorX"]][["Vec"]], integer(5), msg = "VectorXi::Zero(5)") checkEquals( res[["matrices : MatrixX"]][["Mat"]], (1+0i) * diag(nr=3L), msg = "MatrixXcd::Identity(3,3)") checkEquals( res[["matrices : MatrixX"]][["Mat"]], diag(nr=3L), msg = "MatrixXd::Identity(3,3)") checkEquals( res[["matrices : MatrixX"]][["Mat"]], diag(nr=3L), msg = "MatrixXf::Identity(3,3)") checkEquals( res[["matrices : MatrixX"]][["Mat"]], matrix(as.integer((diag(nr=3L))),nr=3L), msg = "MatrixXi::Identity(3,3)") checkEquals( res[["rows : RowVectorX"]][["Row"]], matrix(complex(5), nr=1L), msg = "RowVectorXcd::Zero(5)") checkEquals( res[["rows : RowVectorX"]][["Row"]], matrix(numeric(5), nr=1L), msg = "RowVectorXd::Zero(5)") checkEquals( res[["rows : RowVectorX"]][["Row"]], matrix(numeric(5), nr=1L), msg = "RowVectorXf::Zero(5)") checkEquals( res[["rows : RowVectorX"]][["Row"]], matrix(integer(5), nr=1L), msg = "RowVectorXi::Zero(5)") checkEquals( res[["columns : MatrixX"]][["Col"]], as.matrix(complex(5)), msg = "MatrixXcd::Zero(5, 1)") checkEquals( res[["columns : MatrixX"]][["Col"]], as.matrix(numeric(5)), msg = "MatrixXd::Zero(5, 1)") checkEquals( res[["columns : MatrixX"]][["Col"]], as.matrix(numeric(5)), msg = "MatrixXf::Zero(5, 1)") checkEquals( res[["columns : MatrixX"]][["Col"]], as.matrix(integer(5)), msg = "MatrixXi::Zero(5, 1)") checkEquals( res[["arrays2d : ArrayXX"]][["Arr2"]], matrix(complex(9L), nc=3L), msg = "ArrayXXcd::Zero(3,3)") checkEquals( res[["arrays2d : ArrayXX"]][["Arr2"]], matrix(numeric(9L), nc=3L), msg = "ArrayXXd::Zero(3,3)") checkEquals( res[["arrays2d : ArrayXX"]][["Arr2"]], matrix(numeric(9L), nc=3L), msg = "ArrayXXf::Zero(3,3)") checkEquals( res[["arrays2d : ArrayXX"]][["Arr2"]], matrix(integer(9L), nc=3L), msg = "ArrayXXi::Zero(3,3)") checkEquals( res[["arrays1d : ArrayX"]][["Arr1"]], complex(5), msg = "ArrayXcd::Zero(5)") checkEquals( res[["arrays1d : ArrayX"]][["Arr1"]], double(5), msg = "ArrayXd::Zero(5)") checkEquals( res[["arrays1d : ArrayX"]][["Arr1"]], double(5), msg = "ArrayXf::Zero(5)") checkEquals( res[["arrays1d : ArrayX"]][["Arr1"]], integer(5), msg = "ArrayXi::Zero(5)") oneTen <- seq(1, 10, length.out=6L) checkEquals( res[["operations : ArrayXd"]][["Op_seq"]], oneTen, msg = "Op_seq") checkEquals( res[["operations : ArrayXd"]][["Op_log"]], log(oneTen), msg = "Op_log") checkEquals( res[["operations : ArrayXd"]][["Op_exp"]], exp(oneTen), msg = "Op_exp") checkEquals( res[["operations : ArrayXd"]][["Op_sqrt"]], sqrt(oneTen), msg = "Op_sqrt") checkEquals( res[["operations : ArrayXd"]][["Op_cos"]], cos(oneTen), msg = "Op_cos") } test.as.Vec <- function(){ fx <- cxxfunction( signature(input_ = "list" ) , ' List input(input_) ; Eigen::VectorXi m1 = input[0] ; /* implicit as */ Eigen::VectorXd m2 = input[1] ; /* implicit as */ Eigen::Matrix m3 = input[0] ; /* implicit as */ Eigen::VectorXf m4 = input[1] ; /* implicit as */ List res = List::create(m1.sum(), m2.sum(), m3.sum(), m4.sum()); return res ; ', plugin = "RcppEigen" ) res <- fx( list( 1:10, as.numeric(1:10) ) ) checkEquals( unlist( res ), rep(55.0, 4 ), msg = "as" ) } test.as.MVec <- function(){ fx <- cxxfunction( signature(input_ = "list" ) , ' List input(input_) ; const Eigen::Map m1 = input[0] ; // maps share storage and do not allow conversion const Eigen::Map m2 = input[1] ; List res = List::create(m1.sum(), m2.sum()); return res ; ', plugin = "RcppEigen" ) res <- fx( list( 1:10, as.numeric(1:10) ) ) checkEquals( unlist( res ), rep(55.0, 2 ), msg = "as" ) } test.as.MRowVec <- function(){ fx <- cxxfunction( signature(input_ = "list" ) , ' List input(input_) ; const Eigen::Map m1 = input[0] ; // maps share storage and do not allow conversion const Eigen::Map m2 = input[1] ; List res = List::create(m1.sum(), m2.sum()); return res ; ', plugin = "RcppEigen" ) res <- fx( list( 1:10, as.numeric(1:10) ) ) checkEquals( unlist( res ), rep(55.0, 2 ), msg = "as" ) } test.as.MMat <- function(){ fx <- cxxfunction( signature(input_ = "list" ) , ' List input(input_) ; const Eigen::Map m1 = input[0]; // maps share storage and do not allow conversion const Eigen::Map m2 = input[1] ; // FIXME: Write a version of as specifically for complex matrices. // const Eigen::Map m3 = input[2] ; List res = List::create(m1.sum(), m2.sum());//, m3.sum()); return res ; ', plugin = "RcppEigen" ) integer_mat <- matrix(as.integer(diag(nr=4L)), nc=4L) numeric_mat <- diag(nr=5L) complex_mat <- (1+0i) * diag(nr=5L) res <- fx(list(integer_mat, numeric_mat, complex_mat)) checkEquals(unlist(res), c(4L, 5)#, 5+0i) , msg = "as" ) } test.as.MSpMat <- function() { suppressMessages(require("Matrix")) data("KNex", package = "Matrix") fx <- cxxfunction( signature(input_ = "list"), ' List input(input_) ; const Eigen::MappedSparseMatrix m1 = input[0]; // maps share storage and do not allow conversion List res = List::create(_["nnz"] = double(m1.nonZeros()), _["nr"] = double(m1.rows()), _["nc"] = double(m1.cols()), _["inSz"] = double(m1.innerSize()), _["outSz"] = double(m1.outerSize()), _["sum"] = m1.sum()); return res ; ', plugin = "RcppEigen" ) KNX <- KNex[[1]] res <- fx(KNex) checkEquals(unname(unlist(res)), as.numeric(c(nnzero(KNX), nrow(KNX), ncol(KNX), nrow(KNX), ncol(KNX), sum(KNX@x))), msg = "as") } test.as.SpMat <- function() { suppressMessages(require("Matrix")) data("KNex", package = "Matrix") fx <- cxxfunction( signature(input_ = "list"), ' List input(input_) ; const Eigen::SparseMatrix m1 = input[0]; List res = List::create(_["nnz"] = double(m1.nonZeros()), _["nr"] = double(m1.rows()), _["nc"] = double(m1.cols()), _["inSz"] = double(m1.innerSize()), _["outSz"] = double(m1.outerSize()), _["sum"] = m1.sum()); return res ; ', plugin = "RcppEigen" ) KNX <- KNex[[1]] res <- fx(KNex) checkEquals(unname(unlist(res)), as.numeric(c(nnzero(KNX), nrow(KNX), ncol(KNX), nrow(KNX), ncol(KNX), sum(KNX@x))), msg = "as") } RcppEigen/inst/unitTests/runTests.R0000644000176200001440000000700712253717461017073 0ustar liggesusers pkg <- "RcppEigen" if ( ! require( "inline", character.only = TRUE, quietly = TRUE ) ){ stop( "The inline package is required to run RcppEigen unit tests" ) } if ( compareVersion( packageDescription( "inline" )[["Version"]], "0.3.5" ) < 0 ){ stop( "RcppEigen unit tests need at least the version 0.3.5 of inline" ) } if (require("RUnit", quietly = TRUE)) { is_local <- function(){ if ( exists( "argv", globalenv() ) && "--local" %in% argv ) return(TRUE) if ( "--local" %in% commandArgs(TRUE) ) return(TRUE) FALSE } if ( is_local() ) path <- getwd() library(package=pkg, character.only = TRUE) if (!(exists("path") && file.exists(path))) path <- system.file("unitTests", package = pkg) ## --- Testing --- ## Define tests testSuite <- defineTestSuite(name=paste(pkg, "unit testing"), dirs = path) if (interactive()) { cat("Now have RUnit Test Suite 'testSuite' for package '", pkg, "' :\n", sep='') str(testSuite) cat('', "Consider doing", "\t tests <- runTestSuite(testSuite)", "\nand later", "\t printTextProtocol(tests)", '', sep="\n") } else { ## run from shell / Rscript / R CMD Batch / ... ## Run tests <- runTestSuite(testSuite) output <- NULL process_args <- function(argv){ if ( !is.null(argv) && length(argv) > 0 ){ rx <- "^--output=(.*)$" g <- grep( rx, argv, value = TRUE ) if ( length(g) ){ sub( rx, "\\1", g[1L] ) } } } # R CMD check uses this if ( exists( "RcppEigen.unit.test.output.dir", globalenv() ) ){ output <- RcppEigen.unit.test.output.dir } else { ## give a chance to the user to customize where he/she wants ## the unit tests results to be stored with the --output= command ## line argument if ( exists( "argv", globalenv() ) ){ ## littler output <- process_args(argv) } else { ## Rscript output <- process_args(commandArgs(TRUE)) } } if( is.null(output) ) { # if it did not work, use parent dir output <- ".." # as BDR does not want /tmp to be used } ## Print results output.txt <- file.path( output, sprintf("%s-unitTests.txt", pkg)) output.html <- file.path( output, sprintf("%s-unitTests.html", pkg)) printTextProtocol(tests, fileName=output.txt) message( sprintf( "saving txt unit test report to '%s'", output.txt ) ) ## Print HTML version to a file ## printHTMLProtocol has problems on Mac OS X if (Sys.info()["sysname"] != "Darwin"){ message( sprintf( "saving html unit test report to '%s'", output.html ) ) printHTMLProtocol(tests, fileName=output.html) } ## stop() if there are any failures i.e. FALSE to unit test. ## This will cause R CMD check to return error and stop err <- getErrors(tests) if ( (err$nFail + err$nErr) > 0) { stop( sprintf( "unit test problems: %d failures, %d errors", err$nFail, err$nErr) ) } else { success <- err$nTestFunc - err$nFail - err$nErr - err$nDeactivated cat( sprintf( "%d / %d\n", success, err$nTestFunc ) ) } } } else { cat("R package 'RUnit' cannot be loaded -- no unit tests run\n", "for package", pkg,"\n") } RcppEigen/inst/unitTests/runit.sparse.R0000644000176200001440000001754513100733213017672 0ustar liggesusers#!/usr/bin/r -t # # Copyright (C) 2011 Douglas Bates, Dirk Eddelbuettel and Romain Francois # # This file is part of RcppEigen # # RcppEigen is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # RcppEigen is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with RcppEigen. If not, see . .setUp <- function(){ suppressMessages(require(inline)) } test.wrapSparse.double.R <- function(){ fx <- cxxfunction( , ' Eigen::SparseMatrix mm(9,3); mm.reserve(9); for (int j = 0; j < 3; ++j) { mm.startVec(j); for (int i = 3 * j; i < 3 * (j + 1); ++i) mm.insertBack(i, j) = 1.; } mm.finalize(); return wrap(mm); ' , plugin = "RcppEigen" ) res <- fx() rr <- Matrix::t(as(gl(3,3), "sparseMatrix")) colnames(rr) <- NULL checkEquals( res, rr, msg = "wrap >") } test.wrapSparse.double.ColMajor.R <- function(){ fx <- cxxfunction( , ' Eigen::SparseMatrix mm(9,3); mm.reserve(9); for (int j = 0; j < 3; ++j) { mm.startVec(j); for (int i = 3 * j; i < 3 * (j + 1); ++i) mm.insertBack(i, j) = 1.; } mm.finalize(); return wrap(mm); ' , plugin = "RcppEigen" ) res <- fx() rr <- Matrix::t(as(gl(3,3), "sparseMatrix")) colnames(rr) <- NULL checkEquals( res, rr, msg = "wrap >") } ## test.wrapSparse.int.ColMajor.R <- function(){ ## classes not yet exported from Matrix ## fx <- cxxfunction( , ' ## Eigen::SparseMatrix mm(9,3); ## mm.reserve(9); ## for (int j = 0; j < 3; ++j) { ## mm.startVec(j); ## for (int i = 3 * j; i < 3 * (j + 1); ++i) ## mm.insertBack(i, j) = 1; ## } ## mm.finalize(); ## return wrap(mm); ## ' , plugin = "RcppEigen" ) ## #res <- fx() ## #rr <- Matrix::t(as(gl(3,3), "sparseMatrix")) ## #colnames(rr) <- NULL ## #checkEquals( res, rr, msg = "wrap >") ## checkException( fx(), msg = "wrap >" ) ## } test.wrapSparse.double.RowMajor.R <- function(){ fx <- cxxfunction( , ' Eigen::SparseMatrix mm(9,3); mm.reserve(9); for (int irow = 0; irow < 9; ++irow) { mm.startVec(irow); mm.insertBack(irow, irow / 3) = static_cast( 9 - irow ); } mm.finalize(); return wrap(mm); ' , plugin = "RcppEigen" ) res <- fx() rr <- new( "dgRMatrix", j=rep(0L:2L, each=3), p=0L:9L, x=as.numeric(9:1), Dim=c(9L,3L) ) colnames(rr) <- NULL checkEquals( res, rr, msg = "wrap >") } ## test.wrapSparse.int.RowMajor.R <- function(){ ## fx <- cxxfunction( , ' ## Eigen::SparseMatrix mm(9,3); ## mm.reserve(9); ## for (int irow = 0; irow < 9; ++irow) { ## mm.startVec(irow); ## mm.insertBack(irow, irow / 3) = 9 - irow; ## } ## mm.finalize(); ## return wrap(mm); ## ' , plugin = "RcppEigen" ) ## #res <- fx() ## #rr <- new( "igRMatrix", j=rep(0L:2L, each=3), p=0L:9L, x=9L:1L, Dim=c(9L,3L) ) ## #colnames(rr) <- NULL ## #checkEquals( res, rr, msg = "wrap >") ## checkException( fx(), msg = "wrap >" ) ## } test.asSparse.double.ColMajor.R <- function(){ fx <- cxxfunction( sig=signature(R_mm="dgCMatrix"), ' Eigen::SparseMatrix mm = Rcpp::as >( R_mm ); return wrap(mm); ' , plugin = "RcppEigen" ) rr <- Matrix::t(as(gl(3,3), "sparseMatrix")) colnames(rr) <- NULL res <- fx( R_mm = rr ) checkEquals( res, rr, msg = "as >") } test.asMappedSparse.double.ColMajor.R <- function(){ fx <- cxxfunction( sig=signature(R_mm="dgCMatrix"), ' typedef Eigen::Map > MapMat; MapMat mm = Rcpp::as( R_mm ); return wrap(mm); ' , plugin = "RcppEigen" ) rr <- Matrix::t(as(gl(3,3), "sparseMatrix")) colnames(rr) <- NULL res <- fx( R_mm = rr ) checkEquals( res, rr, msg = "as > >") } test.asMappedSparse.deprecated.double.ColMajor.R <- function(){ fx <- cxxfunction( sig=signature(R_mm="dgCMatrix"), ' // Deprecated typedef Eigen::MappedSparseMatrix MapMat; MapMat mm = Rcpp::as( R_mm ); return wrap(mm); ' , plugin = "RcppEigen" ) rr <- Matrix::t(as(gl(3,3), "sparseMatrix")) colnames(rr) <- NULL res <- fx( R_mm = rr ) checkEquals( res, rr, msg = "as >") } test.asSparse.double.RowMajor.R <- function(){ fx <- cxxfunction( sig=signature(R_mm="dgRMatrix"), ' Eigen::SparseMatrix mm = Rcpp::as >( R_mm ); return wrap(mm); ' , plugin = "RcppEigen" ) rr <- new( "dgRMatrix", j=rep(0L:2L, each=3), p=0L:9L, x=as.numeric(9:1), Dim=c(9L,3L) ) colnames(rr) <- NULL res <- fx( R_mm = rr ) checkEquals( res, rr, msg = "as >") } test.asMappedSparse.double.RowMajor.R <- function(){ fx <- cxxfunction( sig=signature(R_mm="dgRMatrix"), ' typedef Eigen::Map > MapMat; MapMat mm = Rcpp::as( R_mm ); return wrap(mm); ' , plugin = "RcppEigen" ) rr <- new( "dgRMatrix", j=rep(0L:2L, each=3), p=0L:9L, x=as.numeric(9:1), Dim=c(9L,3L) ) colnames(rr) <- NULL res <- fx( R_mm = rr ) checkEquals( res, rr, msg = "as > >") } test.asMappedSparse.deprecated.double.RowMajor.R <- function(){ fx <- cxxfunction( sig=signature(R_mm="dgRMatrix"), ' // Deprecated typedef Eigen::MappedSparseMatrix MapMat; MapMat mm = Rcpp::as( R_mm ); return wrap(mm); ' , plugin = "RcppEigen" ) rr <- new( "dgRMatrix", j=rep(0L:2L, each=3), p=0L:9L, x=as.numeric(9:1), Dim=c(9L,3L) ) colnames(rr) <- NULL res <- fx( R_mm = rr ) checkEquals( res, rr, msg = "as >") } test.sparseCholesky.R <- function() { suppressMessages(require("Matrix", character.only=TRUE)) data("KNex", package = "Matrix") fx <- cxxfunction( signature(input_ = "list"), ' using Eigen::VectorXd; using Eigen::MatrixXd; using Eigen::Lower; using Eigen::Map; using Eigen::SparseMatrix; using Eigen::SimplicialLDLT; using Eigen::Success; List input(input_); const Map > m1 = input[0]; const Map v1 = input[1]; SparseMatrix m2(m1.cols(), m1.cols()); m2.selfadjointView().rankUpdate(m1.adjoint()); SimplicialLDLT > ff(m2); VectorXd res = ff.solve(m1.adjoint() * v1); return List::create(_["res"] = res, _["rows"] = double(ff.rows()), _["cols"] = double(ff.cols())); ', plugin = "RcppEigen") rr <- fx(KNex) checkEquals(rr[[1]], as.vector(solve(crossprod(KNex[[1]]), crossprod(KNex[[1]], KNex[[2]])), mode="numeric"), "Cholmod solution") } RcppEigen/inst/skeleton/0000755000176200001440000000000012472772267014747 5ustar liggesusersRcppEigen/inst/skeleton/Makevars0000644000176200001440000000050612472772267016444 0ustar liggesusers## With Rcpp 0.11.0 and later, we no longer need to set PKG_LIBS as there is ## no user-facing library. The include path to headers is already set by R. #PKG_LIBS = ## With R 3.1.0 or later, you can uncomment the following line to tell R to ## enable compilation with C++11 (or even C++14) where available #CXX_STD = CXX11 RcppEigen/inst/skeleton/rcppeigen_hello_world.Rd0000644000176200001440000000250412472772267021605 0ustar liggesusers\name{RcppEigen-Functions} \alias{rcppeigen_hello_world} \alias{rcppeigen_innerproduct} \alias{rcppeigen_outerproduct} \alias{rcppeigen_bothproducts} \title{Set of functions in example RcppEigen package} \description{ These four functions are created when \code{RcppEigen.package.skeleton()} is invoked to create a skeleton packages. } \usage{ rcppeigen_hello_world() rcppeigen_outerproduct(x) rcppeigen_innerproduct(x) rcppeigen_bothproducts(x) } \arguments{ \item{x}{a numeric vector} } \value{ \code{rcppeigen_hello_world()} does not return a value, but displays a message to the console. \code{rcppeigen_outerproduct()} returns a numeric matrix computed as the outer (vector) product of \code{x}. \code{rcppeigen_innerproduct()} returns a double computer as the inner (vector) product of \code{x}. \code{rcppeigen_bothproducts()} returns a list with both the outer and inner products. } \details{ These are example functions which should be largely self-explanatory. Their main benefit is to demonstrate how to write a function using the Eigen C++ classes, and to have to such a function accessible from R. } \references{ See the documentation for Eigen, and RcppEigen, for more details. } \examples{ x <- sqrt(1:4) rcppeigen_innerproduct(x) rcppeigen_outerproduct(x) } \author{Dirk Eddelbuettel} RcppEigen/inst/skeleton/Makevars.win0000644000176200001440000000050612472772267017240 0ustar liggesusers## With Rcpp 0.11.0 and later, we no longer need to set PKG_LIBS as there is ## no user-facing library. The include path to headers is already set by R. #PKG_LIBS = ## With R 3.1.0 or later, you can uncomment the following line to tell R to ## enable compilation with C++11 (or even C++14) where available #CXX_STD = CXX11 RcppEigen/inst/skeleton/rcppeigen_hello_world.cpp0000644000176200001440000000273112304401032021771 0ustar liggesusers// -*- mode: C++; c-indent-level: 4; c-basic-offset: 4; indent-tabs-mode: nil; -*- // we only include RcppEigen.h which pulls Rcpp.h in for us #include // via the depends attribute we tell Rcpp to create hooks for // RcppEigen so that the build process will know what to do // // [[Rcpp::depends(RcppEigen)]] // simple example of creating two matrices and // returning the result of an operatioon on them // // via the exports attribute we tell Rcpp to make this function // available from R // // [[Rcpp::export]] Eigen::MatrixXd rcppeigen_hello_world() { Eigen::MatrixXd m1 = Eigen::MatrixXd::Identity(3, 3); Eigen::MatrixXd m2 = Eigen::MatrixXd::Random(3, 3); return m1 + 3 * (m1 + m2); } // another simple example: outer product of a vector, // returning a matrix // // [[Rcpp::export]] Eigen::MatrixXd rcppeigen_outerproduct(const Eigen::VectorXd & x) { Eigen::MatrixXd m = x * x.transpose(); return m; } // and the inner product returns a scalar // // [[Rcpp::export]] double rcppeigen_innerproduct(const Eigen::VectorXd & x) { double v = x.transpose() * x; return v; } // and we can use Rcpp::List to return both at the same time // // [[Rcpp::export]] Rcpp::List rcppeigen_bothproducts(const Eigen::VectorXd & x) { Eigen::MatrixXd op = x * x.transpose(); double ip = x.transpose() * x; return Rcpp::List::create(Rcpp::Named("outer")=op, Rcpp::Named("inner")=ip); } RcppEigen/inst/include/0000755000176200001440000000000013100733213014517 5ustar liggesusersRcppEigen/inst/include/RcppEigenCholmod.h0000644000176200001440000014224713061745560020102 0ustar liggesusers// -*- mode: C++; c-indent-level: 4; c-basic-offset: 4; tab-width: 8 -*- // // cholmod.h: selected headers from Tim Davis's CHOLMOD package // // Copyright (C) 2011 Douglas Bates, Martin Maechler, Dirk Eddelbuettel and Romain Francois // // This file is part of RcppEigen. // // RcppEigen is free software: you can redistribute it and/or modify it // under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // RcppEigen is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with RcppEigen. If not, see . #ifndef RcppEigen_CHOLMOD_H #define RcppEigen_CHOLMOD_H /* Original of this is Matrix/inst/include/cholmod.h --- sync with it (e.g for lme4) ! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include #include // Rather use C99 -- which we require in R anyway #include #ifdef __cplusplus extern "C" { #endif // from Matrix/src/SuiteSparse_config/SuiteSparse_config.h - line 51 : #ifndef SuiteSparse_long /* #ifdef _WIN64 */ /* #define SuiteSparse_long __int64 */ /* #define SuiteSparse_long_max _I64_MAX */ /* #define SuiteSparse_long_idd "I64d" */ /* #else */ /* #define SuiteSparse_long long */ /* #define SuiteSparse_long_max LONG_MAX */ /* #define SuiteSparse_long_idd "ld" */ /* #endif */ #define SuiteSparse_long int64_t // typically long (but on WIN64) #define SuiteSparse_ulong uint64_t // only needed for ../COLAMD/Source/colamd.c (original has 'unsigned Int' which fails!!) #define SuiteSparse_long_max 9223372036854775801 // typically LONG_MAX (but ..) #define SuiteSparse_long_idd PRId64 // typically "ld" #define SuiteSparse_long_id "%" SuiteSparse_long_idd #endif /* For backward compatibility with prior versions of SuiteSparse. The UF_* * macros are deprecated and will be removed in a future version. */ #ifndef UF_long #define UF_long SuiteSparse_long #define UF_long_max SuiteSparse_long_max #define UF_long_idd SuiteSparse_long_idd #define UF_long_id SuiteSparse_long_id #endif #define CHOLMOD_HAS_VERSION_FUNCTION #define CHOLMOD_DATE "April 25, 2013" #define CHOLMOD_VER_CODE(main,sub) ((main) * 1000 + (sub)) #define CHOLMOD_MAIN_VERSION 2 #define CHOLMOD_SUB_VERSION 1 #define CHOLMOD_SUBSUB_VERSION 2 #define CHOLMOD_VERSION \ CHOLMOD_VER_CODE(CHOLMOD_MAIN_VERSION,CHOLMOD_SUB_VERSION) // from ../../src/CHOLMOD/Include/cholmod_core.h - line 275 : /* Each CHOLMOD object has its own type code. */ #define CHOLMOD_COMMON 0 #define CHOLMOD_SPARSE 1 #define CHOLMOD_FACTOR 2 #define CHOLMOD_DENSE 3 #define CHOLMOD_TRIPLET 4 /* ========================================================================== */ /* === CHOLMOD Common ======================================================= */ /* ========================================================================== */ /* itype defines the types of integer used: */ #define CHOLMOD_INT 0 /* all integer arrays are int */ #define CHOLMOD_INTLONG 1 /* most are int, some are SuiteSparse_long */ #define CHOLMOD_LONG 2 /* all integer arrays are SuiteSparse_long */ /* The itype of all parameters for all CHOLMOD routines must match. * FUTURE WORK: CHOLMOD_INTLONG is not yet supported. */ /* dtype defines what the numerical type is (double or float): */ #define CHOLMOD_DOUBLE 0 /* all numerical values are double */ #define CHOLMOD_SINGLE 1 /* all numerical values are float */ /* The dtype of all parameters for all CHOLMOD routines must match. * * Scalar floating-point values are always passed as double arrays of size 2 * (for the real and imaginary parts). They are typecast to float as needed. * FUTURE WORK: the float case is not supported yet. */ /* xtype defines the kind of numerical values used: */ #define CHOLMOD_PATTERN 0 /* pattern only, no numerical values */ #define CHOLMOD_REAL 1 /* a real matrix */ #define CHOLMOD_COMPLEX 2 /* a complex matrix (ANSI C99 compatible) */ #define CHOLMOD_ZOMPLEX 3 /* a complex matrix (MATLAB compatible) */ /* Definitions for cholmod_common: */ #define CHOLMOD_MAXMETHODS 9 /* maximum number of different methods that */ /* cholmod_analyze can try. Must be >= 9. */ /* Common->status values. zero means success, negative means a fatal error, * positive is a warning. */ #define CHOLMOD_OK 0 /* success */ #define CHOLMOD_NOT_INSTALLED (-1) /* failure: method not installed */ #define CHOLMOD_OUT_OF_MEMORY (-2) /* failure: out of memory */ #define CHOLMOD_TOO_LARGE (-3) /* failure: integer overflow occured */ #define CHOLMOD_INVALID (-4) /* failure: invalid input */ #define CHOLMOD_GPU_PROBLEM (-5) /* failure: GPU fatal error */ #define CHOLMOD_NOT_POSDEF (1) /* warning: matrix not pos. def. */ #define CHOLMOD_DSMALL (2) /* warning: D for LDL' or diag(L) or */ /* LL' has tiny absolute value */ /* ordering method (also used for L->ordering) */ #define CHOLMOD_NATURAL 0 /* use natural ordering */ #define CHOLMOD_GIVEN 1 /* use given permutation */ #define CHOLMOD_AMD 2 /* use minimum degree (AMD) */ #define CHOLMOD_METIS 3 /* use METIS' nested dissection */ #define CHOLMOD_NESDIS 4 /* use CHOLMOD's version of nested dissection:*/ /* node bisector applied recursively, followed * by constrained minimum degree (CSYMAMD or * CCOLAMD) */ #define CHOLMOD_COLAMD 5 /* use AMD for A, COLAMD for A*A' */ /* POSTORDERED is not a method, but a result of natural ordering followed by a * weighted postorder. It is used for L->ordering, not method [ ].ordering. */ #define CHOLMOD_POSTORDERED 6 /* natural ordering, postordered. */ /* supernodal strategy (for Common->supernodal) */ #define CHOLMOD_SIMPLICIAL 0 /* always do simplicial */ #define CHOLMOD_AUTO 1 /* select simpl/super depending on matrix */ #define CHOLMOD_SUPERNODAL 2 /* always do supernodal */ typedef struct cholmod_common_struct { /* ---------------------------------------------------------------------- */ /* parameters for symbolic/numeric factorization and update/downdate */ /* ---------------------------------------------------------------------- */ double dbound ; /* Smallest absolute value of diagonal entries of D * for LDL' factorization and update/downdate/rowadd/ * rowdel, or the diagonal of L for an LL' factorization. * Entries in the range 0 to dbound are replaced with dbound. * Entries in the range -dbound to 0 are replaced with -dbound. No * changes are made to the diagonal if dbound <= 0. Default: zero */ double grow0 ; /* For a simplicial factorization, L->i and L->x can * grow if necessary. grow0 is the factor by which * it grows. For the initial space, L is of size MAX (1,grow0) times * the required space. If L runs out of space, the new size of L is * MAX(1.2,grow0) times the new required space. If you do not plan on * modifying the LDL' factorization in the Modify module, set grow0 to * zero (or set grow2 to 0, see below). Default: 1.2 */ double grow1 ; size_t grow2 ; /* For a simplicial factorization, each column j of L * is initialized with space equal to * grow1*L->ColCount[j] + grow2. If grow0 < 1, grow1 < 1, or grow2 == 0, * then the space allocated is exactly equal to L->ColCount[j]. If the * column j runs out of space, it increases to grow1*need + grow2 in * size, where need is the total # of nonzeros in that column. If you do * not plan on modifying the factorization in the Modify module, set * grow2 to zero. Default: grow1 = 1.2, grow2 = 5. */ size_t maxrank ; /* rank of maximum update/downdate. Valid values: * 2, 4, or 8. A value < 2 is set to 2, and a * value > 8 is set to 8. It is then rounded up to the next highest * power of 2, if not already a power of 2. Workspace (Xwork, below) of * size nrow-by-maxrank double's is allocated for the update/downdate. * If an update/downdate of rank-k is requested, with k > maxrank, * it is done in steps of maxrank. Default: 8, which is fastest. * Memory usage can be reduced by setting maxrank to 2 or 4. */ double supernodal_switch ; /* supernodal vs simplicial factorization */ int supernodal ; /* If Common->supernodal <= CHOLMOD_SIMPLICIAL * (0) then cholmod_analyze performs a * simplicial analysis. If >= CHOLMOD_SUPERNODAL (2), then a supernodal * analysis is performed. If == CHOLMOD_AUTO (1) and * flop/nnz(L) < Common->supernodal_switch, then a simplicial analysis * is done. A supernodal analysis done otherwise. * Default: CHOLMOD_AUTO. Default supernodal_switch = 40 */ int final_asis ; /* If TRUE, then ignore the other final_* parameters * (except for final_pack). * The factor is left as-is when done. Default: TRUE.*/ int final_super ; /* If TRUE, leave a factor in supernodal form when * supernodal factorization is finished. If FALSE, * then convert to a simplicial factor when done. * Default: TRUE */ int final_ll ; /* If TRUE, leave factor in LL' form when done. * Otherwise, leave in LDL' form. Default: FALSE */ int final_pack ; /* If TRUE, pack the columns when done. If TRUE, and * cholmod_factorize is called with a symbolic L, L is * allocated with exactly the space required, using L->ColCount. If you * plan on modifying the factorization, set Common->final_pack to FALSE, * and each column will be given a little extra slack space for future * growth in fill-in due to updates. Default: TRUE */ int final_monotonic ; /* If TRUE, ensure columns are monotonic when done. * Default: TRUE */ int final_resymbol ;/* if cholmod_factorize performed a supernodal * factorization, final_resymbol is true, and * final_super is FALSE (convert a simplicial numeric factorization), * then numerically zero entries that resulted from relaxed supernodal * amalgamation are removed. This does not remove entries that are zero * due to exact numeric cancellation, since doing so would break the * update/downdate rowadd/rowdel routines. Default: FALSE. */ /* supernodal relaxed amalgamation parameters: */ double zrelax [3] ; size_t nrelax [3] ; /* Let ns be the total number of columns in two adjacent supernodes. * Let z be the fraction of zero entries in the two supernodes if they * are merged (z includes zero entries from prior amalgamations). The * two supernodes are merged if: * (ns <= nrelax [0]) || (no new zero entries added) || * (ns <= nrelax [1] && z < zrelax [0]) || * (ns <= nrelax [2] && z < zrelax [1]) || (z < zrelax [2]) * * Default parameters result in the following rule: * (ns <= 4) || (no new zero entries added) || * (ns <= 16 && z < 0.8) || (ns <= 48 && z < 0.1) || (z < 0.05) */ int prefer_zomplex ; /* X = cholmod_solve (sys, L, B, Common) computes * x=A\b or solves a related system. If L and B are * both real, then X is real. Otherwise, X is returned as * CHOLMOD_COMPLEX if Common->prefer_zomplex is FALSE, or * CHOLMOD_ZOMPLEX if Common->prefer_zomplex is TRUE. This parameter * is needed because there is no supernodal zomplex L. Suppose the * caller wants all complex matrices to be stored in zomplex form * (MATLAB, for example). A supernodal L is returned in complex form * if A is zomplex. B can be real, and thus X = cholmod_solve (L,B) * should return X as zomplex. This cannot be inferred from the input * arguments L and B. Default: FALSE, since all data types are * supported in CHOLMOD_COMPLEX form and since this is the native type * of LAPACK and the BLAS. Note that the MATLAB/cholmod.c mexFunction * sets this parameter to TRUE, since MATLAB matrices are in * CHOLMOD_ZOMPLEX form. */ int prefer_upper ; /* cholmod_analyze and cholmod_factorize work * fastest when a symmetric matrix is stored in * upper triangular form when a fill-reducing ordering is used. In * MATLAB, this corresponds to how x=A\b works. When the matrix is * ordered as-is, they work fastest when a symmetric matrix is in lower * triangular form. In MATLAB, R=chol(A) does the opposite. This * parameter affects only how cholmod_read returns a symmetric matrix. * If TRUE (the default case), a symmetric matrix is always returned in * upper-triangular form (A->stype = 1). */ int quick_return_if_not_posdef ; /* if TRUE, the supernodal numeric * factorization will return quickly if * the matrix is not positive definite. Default: FALSE. */ /* ---------------------------------------------------------------------- */ /* printing and error handling options */ /* ---------------------------------------------------------------------- */ int print ; /* print level. Default: 3 */ int precise ; /* if TRUE, print 16 digits. Otherwise print 5 */ int (*print_function) (const char *, ...) ; /* pointer to printf */ int try_catch ; /* if TRUE, then ignore errors; CHOLMOD is in the middle * of a try/catch block. No error message is printed * and the Common->error_handler function is not called. */ void (*error_handler) (int status, const char *file, int line, const char *message) ; /* Common->error_handler is the user's error handling routine. If not * NULL, this routine is called if an error occurs in CHOLMOD. status * can be CHOLMOD_OK (0), negative for a fatal error, and positive for * a warning. file is a string containing the name of the source code * file where the error occured, and line is the line number in that * file. message is a string describing the error in more detail. */ /* ---------------------------------------------------------------------- */ /* ordering options */ /* ---------------------------------------------------------------------- */ /* The cholmod_analyze routine can try many different orderings and select * the best one. It can also try one ordering method multiple times, with * different parameter settings. The default is to use three orderings, * the user's permutation (if provided), AMD which is the fastest ordering * and generally gives good fill-in, and METIS. CHOLMOD's nested dissection * (METIS with a constrained AMD) usually gives a better ordering than METIS * alone (by about 5% to 10%) but it takes more time. * * If you know the method that is best for your matrix, set Common->nmethods * to 1 and set Common->method [0] to the set of parameters for that method. * If you set it to 1 and do not provide a permutation, then only AMD will * be called. * * If METIS is not available, the default # of methods tried is 2 (the user * permutation, if any, and AMD). * * To try other methods, set Common->nmethods to the number of methods you * want to try. The suite of default methods and their parameters is * described in the cholmod_defaults routine, and summarized here: * * Common->method [i]: * i = 0: user-provided ordering (cholmod_analyze_p only) * i = 1: AMD (for both A and A*A') * i = 2: METIS * i = 3: CHOLMOD's nested dissection (NESDIS), default parameters * i = 4: natural * i = 5: NESDIS with nd_small = 20000 * i = 6: NESDIS with nd_small = 4, no constrained minimum degree * i = 7: NESDIS with no dense node removal * i = 8: AMD for A, COLAMD for A*A' * * You can modify the suite of methods you wish to try by modifying * Common.method [...] after calling cholmod_start or cholmod_defaults. * * For example, to use AMD, followed by a weighted postordering: * * Common->nmethods = 1 ; * Common->method [0].ordering = CHOLMOD_AMD ; * Common->postorder = TRUE ; * * To use the natural ordering (with no postordering): * * Common->nmethods = 1 ; * Common->method [0].ordering = CHOLMOD_NATURAL ; * Common->postorder = FALSE ; * * If you are going to factorize hundreds or more matrices with the same * nonzero pattern, you may wish to spend a great deal of time finding a * good permutation. In this case, try setting Common->nmethods to 9. * The time spent in cholmod_analysis will be very high, but you need to * call it only once. * * cholmod_analyze sets Common->current to a value between 0 and nmethods-1. * Each ordering method uses the set of options defined by this parameter. */ int nmethods ; /* The number of ordering methods to try. Default: 0. * nmethods = 0 is a special case. cholmod_analyze * will try the user-provided ordering (if given) and AMD. Let fl and * lnz be the flop count and nonzeros in L from AMD's ordering. Let * anz be the number of nonzeros in the upper or lower triangular part * of the symmetric matrix A. If fl/lnz < 500 or lnz/anz < 5, then this * is a good ordering, and METIS is not attempted. Otherwise, METIS is * tried. The best ordering found is used. If nmethods > 0, the * methods used are given in the method[ ] array, below. The first * three methods in the default suite of orderings is (1) use the given * permutation (if provided), (2) use AMD, and (3) use METIS. Maximum * allowed value is CHOLMOD_MAXMETHODS. */ int current ; /* The current method being tried. Default: 0. Valid * range is 0 to nmethods-1. */ int selected ; /* The best method found. */ /* The suite of ordering methods and parameters: */ struct cholmod_method_struct { /* statistics for this method */ double lnz ; /* nnz(L) excl. zeros from supernodal amalgamation, * for a "pure" L */ double fl ; /* flop count for a "pure", real simplicial LL' * factorization, with no extra work due to * amalgamation. Subtract n to get the LDL' flop count. Multiply * by about 4 if the matrix is complex or zomplex. */ /* ordering method parameters */ double prune_dense ;/* dense row/col control for AMD, SYMAMD, CSYMAMD, * and NESDIS (cholmod_nested_dissection). For a * symmetric n-by-n matrix, rows/columns with more than * MAX (16, prune_dense * sqrt (n)) entries are removed prior to * ordering. They appear at the end of the re-ordered matrix. * * If prune_dense < 0, only completely dense rows/cols are removed. * * This paramater is also the dense column control for COLAMD and * CCOLAMD. For an m-by-n matrix, columns with more than * MAX (16, prune_dense * sqrt (MIN (m,n))) entries are removed prior * to ordering. They appear at the end of the re-ordered matrix. * CHOLMOD factorizes A*A', so it calls COLAMD and CCOLAMD with A', * not A. Thus, this parameter affects the dense *row* control for * CHOLMOD's matrix, and the dense *column* control for COLAMD and * CCOLAMD. * * Removing dense rows and columns improves the run-time of the * ordering methods. It has some impact on ordering quality * (usually minimal, sometimes good, sometimes bad). * * Default: 10. */ double prune_dense2 ;/* dense row control for COLAMD and CCOLAMD. * Rows with more than MAX (16, dense2 * sqrt (n)) * for an m-by-n matrix are removed prior to ordering. CHOLMOD's * matrix is transposed before ordering it with COLAMD or CCOLAMD, * so this controls the dense *columns* of CHOLMOD's matrix, and * the dense *rows* of COLAMD's or CCOLAMD's matrix. * * If prune_dense2 < 0, only completely dense rows/cols are removed. * * Default: -1. Note that this is not the default for COLAMD and * CCOLAMD. -1 is best for Cholesky. 10 is best for LU. */ double nd_oksep ; /* in NESDIS, when a node separator is computed, it * discarded if nsep >= nd_oksep*n, where nsep is * the number of nodes in the separator, and n is the size of the * graph being cut. Valid range is 0 to 1. If 1 or greater, the * separator is discarded if it consists of the entire graph. * Default: 1 */ double other1 [4] ; /* future expansion */ size_t nd_small ; /* do not partition graphs with fewer nodes than * nd_small, in NESDIS. Default: 200 (same as * METIS) */ size_t other2 [4] ; /* future expansion */ int aggressive ; /* Aggresive absorption in AMD, COLAMD, SYMAMD, * CCOLAMD, and CSYMAMD. Default: TRUE */ int order_for_lu ; /* CCOLAMD can be optimized to produce an ordering * for LU or Cholesky factorization. CHOLMOD only * performs a Cholesky factorization. However, you may wish to use * CHOLMOD as an interface for CCOLAMD but use it for your own LU * factorization. In this case, order_for_lu should be set to FALSE. * When factorizing in CHOLMOD itself, you should *** NEVER *** set * this parameter FALSE. Default: TRUE. */ int nd_compress ; /* If TRUE, compress the graph and subgraphs before * partitioning them in NESDIS. Default: TRUE */ int nd_camd ; /* If 1, follow the nested dissection ordering * with a constrained minimum degree ordering that * respects the partitioning just found (using CAMD). If 2, use * CSYMAMD instead. If you set nd_small very small, you may not need * this ordering, and can save time by setting it to zero (no * constrained minimum degree ordering). Default: 1. */ int nd_components ; /* The nested dissection ordering finds a node * separator that splits the graph into two parts, * which may be unconnected. If nd_components is TRUE, each of * these connected components is split independently. If FALSE, * each part is split as a whole, even if it consists of more than * one connected component. Default: FALSE */ /* fill-reducing ordering to use */ int ordering ; size_t other3 [4] ; /* future expansion */ } method [CHOLMOD_MAXMETHODS + 1] ; int postorder ; /* If TRUE, cholmod_analyze follows the ordering with a * weighted postorder of the elimination tree. Improves * supernode amalgamation. Does not affect fundamental nnz(L) and * flop count. Default: TRUE. */ /* ---------------------------------------------------------------------- */ /* memory management routines */ /* ---------------------------------------------------------------------- */ void *(*malloc_memory) (size_t) ; /* pointer to malloc */ void *(*realloc_memory) (void *, size_t) ; /* pointer to realloc */ void (*free_memory) (void *) ; /* pointer to free */ void *(*calloc_memory) (size_t, size_t) ; /* pointer to calloc */ /* ---------------------------------------------------------------------- */ /* routines for complex arithmetic */ /* ---------------------------------------------------------------------- */ int (*complex_divide) (double ax, double az, double bx, double bz, double *cx, double *cz) ; /* flag = complex_divide (ax, az, bx, bz, &cx, &cz) computes the complex * division c = a/b, where ax and az hold the real and imaginary part * of a, and b and c are stored similarly. flag is returned as 1 if * a divide-by-zero occurs, or 0 otherwise. By default, the function * pointer Common->complex_divide is set equal to cholmod_divcomplex. */ double (*hypotenuse) (double x, double y) ; /* s = hypotenuse (x,y) computes s = sqrt (x*x + y*y), but does so more * accurately. By default, the function pointer Common->hypotenuse is * set equal to cholmod_hypot. See also the hypot function in the C99 * standard, which has an identical syntax and function. If you have * a C99-compliant compiler, you can set Common->hypotenuse = hypot. */ /* ---------------------------------------------------------------------- */ /* METIS workarounds */ /* ---------------------------------------------------------------------- */ double metis_memory ; /* This is a parameter for CHOLMOD's interface to * METIS, not a parameter to METIS itself. METIS * uses an amount of memory that is difficult to estimate precisely * beforehand. If it runs out of memory, it terminates your program. * All routines in CHOLMOD except for CHOLMOD's interface to METIS * return an error status and safely return to your program if they run * out of memory. To mitigate this problem, the CHOLMOD interface * can allocate a single block of memory equal in size to an empirical * upper bound of METIS's memory usage times the Common->metis_memory * parameter, and then immediately free it. It then calls METIS. If * this pre-allocation fails, it is possible that METIS will fail as * well, and so CHOLMOD returns with an out-of-memory condition without * calling METIS. * * METIS_NodeND (used in the CHOLMOD_METIS ordering option) with its * default parameter settings typically uses about (4*nz+40n+4096) * times sizeof(int) memory, where nz is equal to the number of entries * in A for the symmetric case or AA' if an unsymmetric matrix is * being ordered (where nz includes both the upper and lower parts * of A or AA'). The observed "upper bound" (with 2 exceptions), * measured in an instrumented copy of METIS 4.0.1 on thousands of * matrices, is (10*nz+50*n+4096) * sizeof(int). Two large matrices * exceeded this bound, one by almost a factor of 2 (Gupta/gupta2). * * If your program is terminated by METIS, try setting metis_memory to * 2.0, or even higher if needed. By default, CHOLMOD assumes that METIS * does not have this problem (so that CHOLMOD will work correctly when * this issue is fixed in METIS). Thus, the default value is zero. * This work-around is not guaranteed anyway. * * If a matrix exceeds this predicted memory usage, AMD is attempted * instead. It, too, may run out of memory, but if it does so it will * not terminate your program. */ double metis_dswitch ; /* METIS_NodeND in METIS 4.0.1 gives a seg */ size_t metis_nswitch ; /* fault with one matrix of order n = 3005 and * nz = 6,036,025. This is a very dense graph. * The workaround is to use AMD instead of METIS for matrices of dimension * greater than Common->metis_nswitch (default 3000) or more and with * density of Common->metis_dswitch (default 0.66) or more. * cholmod_nested_dissection has no problems with the same matrix, even * though it uses METIS_NodeComputeSeparator on this matrix. If this * seg fault does not affect you, set metis_nswitch to zero or less, * and CHOLMOD will not switch to AMD based just on the density of the * matrix (it will still switch to AMD if the metis_memory parameter * causes the switch). */ /* ---------------------------------------------------------------------- */ /* workspace */ /* ---------------------------------------------------------------------- */ /* CHOLMOD has several routines that take less time than the size of * workspace they require. Allocating and initializing the workspace would * dominate the run time, unless workspace is allocated and initialized * just once. CHOLMOD allocates this space when needed, and holds it here * between calls to CHOLMOD. cholmod_start sets these pointers to NULL * (which is why it must be the first routine called in CHOLMOD). * cholmod_finish frees the workspace (which is why it must be the last * call to CHOLMOD). */ size_t nrow ; /* size of Flag and Head */ SuiteSparse_long mark ; /* mark value for Flag array */ size_t iworksize ; /* size of Iwork. Upper bound: 6*nrow+ncol */ size_t xworksize ; /* size of Xwork, in bytes. * maxrank*nrow*sizeof(double) for update/downdate. * 2*nrow*sizeof(double) otherwise */ /* initialized workspace: contents needed between calls to CHOLMOD */ void *Flag ; /* size nrow, an integer array. Kept cleared between * calls to cholmod rouines (Flag [i] < mark) */ void *Head ; /* size nrow+1, an integer array. Kept cleared between * calls to cholmod routines (Head [i] = EMPTY) */ void *Xwork ; /* a double array. Its size varies. It is nrow for * most routines (cholmod_rowfac, cholmod_add, * cholmod_aat, cholmod_norm, cholmod_ssmult) for the real case, twice * that when the input matrices are complex or zomplex. It is of size * 2*nrow for cholmod_rowadd and cholmod_rowdel. For cholmod_updown, * its size is maxrank*nrow where maxrank is 2, 4, or 8. Kept cleared * between calls to cholmod (set to zero). */ /* uninitialized workspace, contents not needed between calls to CHOLMOD */ void *Iwork ; /* size iworksize, 2*nrow+ncol for most routines, * up to 6*nrow+ncol for cholmod_analyze. */ int itype ; /* If CHOLMOD_LONG, Flag, Head, and Iwork are SuiteSparse_long. * Otherwise all three arrays are int. */ int dtype ; /* double or float */ /* Common->itype and Common->dtype are used to define the types of all * sparse matrices, triplet matrices, dense matrices, and factors * created using this Common struct. The itypes and dtypes of all * parameters to all CHOLMOD routines must match. */ int no_workspace_reallocate ; /* this is an internal flag, used as a * precaution by cholmod_analyze. It is normally false. If true, * cholmod_allocate_work is not allowed to reallocate any workspace; * they must use the existing workspace in Common (Iwork, Flag, Head, * and Xwork). Added for CHOLMOD v1.1 */ /* ---------------------------------------------------------------------- */ /* statistics */ /* ---------------------------------------------------------------------- */ /* fl and lnz are set only in cholmod_analyze and cholmod_rowcolcounts, * in the Cholesky modudle. modfl is set only in the Modify module. */ int status ; /* error code */ double fl ; /* LL' flop count from most recent analysis */ double lnz ; /* fundamental nz in L */ double anz ; /* nonzeros in tril(A) if A is symmetric/lower, * triu(A) if symmetric/upper, or tril(A*A') if * unsymmetric, in last call to cholmod_analyze. */ double modfl ; /* flop count from most recent update/downdate/ * rowadd/rowdel (excluding flops to modify the * solution to Lx=b, if computed) */ size_t malloc_count ; /* # of objects malloc'ed minus the # free'd*/ size_t memory_usage ; /* peak memory usage in bytes */ size_t memory_inuse ; /* current memory usage in bytes */ double nrealloc_col ; /* # of column reallocations */ double nrealloc_factor ;/* # of factor reallocations due to col. reallocs */ double ndbounds_hit ; /* # of times diagonal modified by dbound */ double rowfacfl ; /* # of flops in last call to cholmod_rowfac */ double aatfl ; /* # of flops to compute A(:,f)*A(:,f)' */ /* ---------------------------------------------------------------------- */ /* future expansion */ /* ---------------------------------------------------------------------- */ /* To allow CHOLMOD to be updated without recompiling the user application, * additional space is set aside here for future statistics, parameters, * and workspace. Note: additional entries were added in v1.1 to the * method array, above, and thus v1.0 and v1.1 are not binary compatible. * * v1.1 to the current version are binary compatible. */ /* ---------------------------------------------------------------------- */ double other1 [10] ; double SPQR_xstat [4] ; /* for SuiteSparseQR statistics */ /* SuiteSparseQR control parameters: */ double SPQR_grain ; /* task size is >= max (total flops / grain) */ double SPQR_small ; /* task size is >= small */ /* ---------------------------------------------------------------------- */ SuiteSparse_long SPQR_istat [10] ; /* for SuiteSparseQR statistics */ SuiteSparse_long other2 [6] ; /* reduced from size 16 in v1.6 */ /* ---------------------------------------------------------------------- */ int other3 [10] ; /* reduced from size 16 in v1.1. */ int prefer_binary ; /* cholmod_read_triplet converts a symmetric * pattern-only matrix into a real matrix. If * prefer_binary is FALSE, the diagonal entries are set to 1 + the degree * of the row/column, and off-diagonal entries are set to -1 (resulting * in a positive definite matrix if the diagonal is zero-free). Most * symmetric patterns are the pattern a positive definite matrix. If * this parameter is TRUE, then the matrix is returned with a 1 in each * entry, instead. Default: FALSE. Added in v1.3. */ /* control parameter (added for v1.2): */ int default_nesdis ; /* Default: FALSE. If FALSE, then the default * ordering strategy (when Common->nmethods == 0) * is to try the given ordering (if present), AMD, and then METIS if AMD * reports high fill-in. If Common->default_nesdis is TRUE then NESDIS * is used instead in the default strategy. */ /* statistic (added for v1.2): */ int called_nd ; /* TRUE if the last call to * cholmod_analyze called NESDIS or METIS. */ int blas_ok ; /* FALSE if BLAS int overflow; TRUE otherwise */ /* SuiteSparseQR control parameters: */ int SPQR_shrink ; /* controls stack realloc method */ int SPQR_nthreads ; /* number of TBB threads, 0 = auto */ /* ---------------------------------------------------------------------- */ size_t other4 [16] ; /* ---------------------------------------------------------------------- */ void *other5 [16] ; } cholmod_common ; // in ../../src/CHOLMOD/Include/cholmod_core.h skip forward to - line 1170 : /* A sparse matrix stored in compressed-column form. */ typedef struct cholmod_sparse_struct { size_t nrow ; /* the matrix is nrow-by-ncol */ size_t ncol ; size_t nzmax ; /* maximum number of entries in the matrix */ /* pointers to int or SuiteSparse_long: */ void *p ; /* p [0..ncol], the column pointers */ void *i ; /* i [0..nzmax-1], the row indices */ /* for unpacked matrices only: */ void *nz ; /* nz [0..ncol-1], the # of nonzeros in each col. In * packed form, the nonzero pattern of column j is in * A->i [A->p [j] ... A->p [j+1]-1]. In unpacked form, column j is in * A->i [A->p [j] ... A->p [j]+A->nz[j]-1] instead. In both cases, the * numerical values (if present) are in the corresponding locations in * the array x (or z if A->xtype is CHOLMOD_ZOMPLEX). */ /* pointers to double or float: */ void *x ; /* size nzmax or 2*nzmax, if present */ void *z ; /* size nzmax, if present */ int stype ; /* Describes what parts of the matrix are considered: * * 0: matrix is "unsymmetric": use both upper and lower triangular parts * (the matrix may actually be symmetric in pattern and value, but * both parts are explicitly stored and used). May be square or * rectangular. * >0: matrix is square and symmetric, use upper triangular part. * Entries in the lower triangular part are ignored. * <0: matrix is square and symmetric, use lower triangular part. * Entries in the upper triangular part are ignored. * * Note that stype>0 and stype<0 are different for cholmod_sparse and * cholmod_triplet. See the cholmod_triplet data structure for more * details. */ int itype ; /* CHOLMOD_INT: p, i, and nz are int. * CHOLMOD_INTLONG: p is SuiteSparse_long, * i and nz are int. * CHOLMOD_LONG: p, i, and nz are SuiteSparse_long */ int xtype ; /* pattern, real, complex, or zomplex */ int dtype ; /* x and z are double or float */ int sorted ; /* TRUE if columns are sorted, FALSE otherwise */ int packed ; /* TRUE if packed (nz ignored), FALSE if unpacked * (nz is required) */ } cholmod_sparse ; // in ../../src/CHOLMOD/Include/cholmod_core.h skip forward to - line 1552 : /* A symbolic and numeric factorization, either simplicial or supernodal. * In all cases, the row indices in the columns of L are kept sorted. */ typedef struct cholmod_factor_struct { /* ---------------------------------------------------------------------- */ /* for both simplicial and supernodal factorizations */ /* ---------------------------------------------------------------------- */ size_t n ; /* L is n-by-n */ size_t minor ; /* If the factorization failed, L->minor is the column * at which it failed (in the range 0 to n-1). A value * of n means the factorization was successful or * the matrix has not yet been factorized. */ /* ---------------------------------------------------------------------- */ /* symbolic ordering and analysis */ /* ---------------------------------------------------------------------- */ void *Perm ; /* size n, permutation used */ void *ColCount ; /* size n, column counts for simplicial L */ void *IPerm ; /* size n, inverse permutation. Only created by * cholmod_solve2 if Bset is used. */ /* ---------------------------------------------------------------------- */ /* simplicial factorization */ /* ---------------------------------------------------------------------- */ size_t nzmax ; /* size of i and x */ void *p ; /* p [0..ncol], the column pointers */ void *i ; /* i [0..nzmax-1], the row indices */ void *x ; /* x [0..nzmax-1], the numerical values */ void *z ; void *nz ; /* nz [0..ncol-1], the # of nonzeros in each column. * i [p [j] ... p [j]+nz[j]-1] contains the row indices, * and the numerical values are in the same locatins * in x. The value of i [p [k]] is always k. */ void *next ; /* size ncol+2. next [j] is the next column in i/x */ void *prev ; /* size ncol+2. prev [j] is the prior column in i/x. * head of the list is ncol+1, and the tail is ncol. */ /* ---------------------------------------------------------------------- */ /* supernodal factorization */ /* ---------------------------------------------------------------------- */ /* Note that L->x is shared with the simplicial data structure. L->x has * size L->nzmax for a simplicial factor, and size L->xsize for a supernodal * factor. */ size_t nsuper ; /* number of supernodes */ size_t ssize ; /* size of s, integer part of supernodes */ size_t xsize ; /* size of x, real part of supernodes */ size_t maxcsize ; /* size of largest update matrix */ size_t maxesize ; /* max # of rows in supernodes, excl. triangular part */ void *super ; /* size nsuper+1, first col in each supernode */ void *pi ; /* size nsuper+1, pointers to integer patterns */ void *px ; /* size nsuper+1, pointers to real parts */ void *s ; /* size ssize, integer part of supernodes */ /* ---------------------------------------------------------------------- */ /* factorization type */ /* ---------------------------------------------------------------------- */ int ordering ; /* ordering method used */ int is_ll ; /* TRUE if LL', FALSE if LDL' */ int is_super ; /* TRUE if supernodal, FALSE if simplicial */ int is_monotonic ; /* TRUE if columns of L appear in order 0..n-1. * Only applicable to simplicial numeric types. */ /* There are 8 types of factor objects that cholmod_factor can represent * (only 6 are used): * * Numeric types (xtype is not CHOLMOD_PATTERN) * -------------------------------------------- * * simplicial LDL': (is_ll FALSE, is_super FALSE). Stored in compressed * column form, using the simplicial components above (nzmax, p, i, * x, z, nz, next, and prev). The unit diagonal of L is not stored, * and D is stored in its place. There are no supernodes. * * simplicial LL': (is_ll TRUE, is_super FALSE). Uses the same storage * scheme as the simplicial LDL', except that D does not appear. * The first entry of each column of L is the diagonal entry of * that column of L. * * supernodal LDL': (is_ll FALSE, is_super TRUE). Not used. * FUTURE WORK: add support for supernodal LDL' * * supernodal LL': (is_ll TRUE, is_super TRUE). A supernodal factor, * using the supernodal components described above (nsuper, ssize, * xsize, maxcsize, maxesize, super, pi, px, s, x, and z). * * * Symbolic types (xtype is CHOLMOD_PATTERN) * ----------------------------------------- * * simplicial LDL': (is_ll FALSE, is_super FALSE). Nothing is present * except Perm and ColCount. * * simplicial LL': (is_ll TRUE, is_super FALSE). Identical to the * simplicial LDL', except for the is_ll flag. * * supernodal LDL': (is_ll FALSE, is_super TRUE). Not used. * FUTURE WORK: add support for supernodal LDL' * * supernodal LL': (is_ll TRUE, is_super TRUE). A supernodal symbolic * factorization. The simplicial symbolic information is present * (Perm and ColCount), as is all of the supernodal factorization * except for the numerical values (x and z). */ int itype ; /* The integer arrays are Perm, ColCount, p, i, nz, * next, prev, super, pi, px, and s. If itype is * CHOLMOD_INT, all of these are int arrays. * CHOLMOD_INTLONG: p, pi, px are SuiteSparse_long, others int. * CHOLMOD_LONG: all integer arrays are SuiteSparse_long. */ int xtype ; /* pattern, real, complex, or zomplex */ int dtype ; /* x and z double or float */ } cholmod_factor ; // in ../../src/CHOLMOD/Include/cholmod_core.h skip forward to - line 1836 : /* A dense matrix in column-oriented form. It has no itype since it contains * no integers. Entry in row i and column j is located in x [i+j*d]. */ typedef struct cholmod_dense_struct { size_t nrow ; /* the matrix is nrow-by-ncol */ size_t ncol ; size_t nzmax ; /* maximum number of entries in the matrix */ size_t d ; /* leading dimension (d >= nrow must hold) */ void *x ; /* size nzmax or 2*nzmax, if present */ void *z ; /* size nzmax, if present */ int xtype ; /* pattern, real, complex, or zomplex */ int dtype ; /* x and z double or float */ } cholmod_dense ; // in ../../src/CHOLMOD/Include/cholmod_core.h skip forward to - line 2033 : /* A sparse matrix stored in triplet form. */ typedef struct cholmod_triplet_struct { size_t nrow ; /* the matrix is nrow-by-ncol */ size_t ncol ; size_t nzmax ; /* maximum number of entries in the matrix */ size_t nnz ; /* number of nonzeros in the matrix */ void *i ; /* i [0..nzmax-1], the row indices */ void *j ; /* j [0..nzmax-1], the column indices */ void *x ; /* size nzmax or 2*nzmax, if present */ void *z ; /* size nzmax, if present */ int stype ; /* Describes what parts of the matrix are considered: * * 0: matrix is "unsymmetric": use both upper and lower triangular parts * (the matrix may actually be symmetric in pattern and value, but * both parts are explicitly stored and used). May be square or * rectangular. * >0: matrix is square and symmetric. Entries in the lower triangular * part are transposed and added to the upper triangular part when * the matrix is converted to cholmod_sparse form. * <0: matrix is square and symmetric. Entries in the upper triangular * part are transposed and added to the lower triangular part when * the matrix is converted to cholmod_sparse form. * * Note that stype>0 and stype<0 are different for cholmod_sparse and * cholmod_triplet. The reason is simple. You can permute a symmetric * triplet matrix by simply replacing a row and column index with their * new row and column indices, via an inverse permutation. Suppose * P = L->Perm is your permutation, and Pinv is an array of size n. * Suppose a symmetric matrix A is represent by a triplet matrix T, with * entries only in the upper triangular part. Then the following code: * * Ti = T->i ; * Tj = T->j ; * for (k = 0 ; k < n ; k++) Pinv [P [k]] = k ; * for (k = 0 ; k < nz ; k++) Ti [k] = Pinv [Ti [k]] ; * for (k = 0 ; k < nz ; k++) Tj [k] = Pinv [Tj [k]] ; * * creates the triplet form of C=P*A*P'. However, if T initially * contains just the upper triangular entries (T->stype = 1), after * permutation it has entries in both the upper and lower triangular * parts. These entries should be transposed when constructing the * cholmod_sparse form of A, which is what cholmod_triplet_to_sparse * does. Thus: * * C = cholmod_triplet_to_sparse (T, 0, &Common) ; * * will return the matrix C = P*A*P'. * * Since the triplet matrix T is so simple to generate, it's quite easy * to remove entries that you do not want, prior to converting T to the * cholmod_sparse form. So if you include these entries in T, CHOLMOD * assumes that there must be a reason (such as the one above). Thus, * no entry in a triplet matrix is ever ignored. */ int itype ; /* CHOLMOD_LONG: i and j are SuiteSparse_long. Otherwise int */ int xtype ; /* pattern, real, complex, or zomplex */ int dtype ; /* x and z are double or float */ } cholmod_triplet ; // -------- our (Matrix) short and const_ forms of of the pointers : typedef cholmod_common* CHM_CM; typedef cholmod_dense* CHM_DN; typedef const cholmod_dense* const_CHM_DN; typedef cholmod_factor* CHM_FR; typedef const cholmod_factor* const_CHM_FR; typedef cholmod_sparse* CHM_SP; typedef const cholmod_sparse* const_CHM_SP; typedef cholmod_triplet* CHM_TR; typedef const cholmod_triplet* const_CHM_TR; // --------- Matrix ("M_") R ("R_") pkg routines "re-exported": --------------- int cholmod_start(CHM_CM); void R_cholmod_error(int status, const char *file, int line, const char *message); int cholmod_finish(CHM_CM); CHM_SP cholmod_allocate_sparse(size_t nrow, size_t ncol, size_t nzmax, int sorted, int packed, int stype, int xtype, CHM_CM); int cholmod_free_factor(CHM_FR *L, CHM_CM); int cholmod_free_dense(CHM_DN *A, CHM_CM); int cholmod_free_sparse(CHM_SP *A, CHM_CM); int cholmod_free_triplet(CHM_TR *T, CHM_CM); long int cholmod_nnz(const_CHM_SP, CHM_CM); CHM_SP cholmod_speye(size_t nrow, size_t ncol, int xtype, CHM_CM); CHM_SP cholmod_transpose(const_CHM_SP, int values, CHM_CM); int cholmod_sort(CHM_SP A, CHM_CM); CHM_SP cholmod_vertcat(const_CHM_SP, const_CHM_SP, int values, CHM_CM); CHM_SP cholmod_copy(const_CHM_SP, int stype, int mode, CHM_CM); CHM_SP cholmod_add(const_CHM_SP, const_CHM_SP, double alpha [2], double beta [2], int values, int sorted, CHM_CM); // from ../../src/CHOLMOD/Include/cholmod_cholesky.h - line 178 : #define CHOLMOD_A 0 /* solve Ax=b */ #define CHOLMOD_LDLt 1 /* solve LDL'x=b */ #define CHOLMOD_LD 2 /* solve LDx=b */ #define CHOLMOD_DLt 3 /* solve DL'x=b */ #define CHOLMOD_L 4 /* solve Lx=b */ #define CHOLMOD_Lt 5 /* solve L'x=b */ #define CHOLMOD_D 6 /* solve Dx=b */ #define CHOLMOD_P 7 /* permute x=Px */ #define CHOLMOD_Pt 8 /* permute x=P'x */ CHM_DN cholmod_solve(int, const_CHM_FR, const_CHM_DN, CHM_CM); CHM_SP cholmod_spsolve(int, const_CHM_FR, const_CHM_SP, CHM_CM); int cholmod_sdmult(const_CHM_SP, int, const double*, const double*, const_CHM_DN, CHM_DN Y, CHM_CM); CHM_SP cholmod_ssmult(const_CHM_SP, const_CHM_SP, int, int, int, CHM_CM); int cholmod_factorize(const_CHM_SP, CHM_FR L, CHM_CM); int cholmod_factorize_p(const_CHM_SP, double *beta, int *fset, size_t fsize, CHM_FR L, CHM_CM); CHM_SP cholmod_copy_sparse(const_CHM_SP, CHM_CM); CHM_DN cholmod_copy_dense(const_CHM_DN, CHM_CM); CHM_SP cholmod_aat(const_CHM_SP, int *fset, size_t fsize, int mode, CHM_CM); int cholmod_band_inplace(CHM_SP A, int k1, int k2, int mode, CHM_CM); CHM_SP cholmod_add(const_CHM_SP, const_CHM_SP, double alpha[2], double beta[2], int values, int sorted, CHM_CM); CHM_DN cholmod_allocate_dense(size_t nrow, size_t ncol, size_t d, int xtype, CHM_CM); CHM_FR cholmod_analyze(const_CHM_SP, CHM_CM); CHM_FR cholmod_analyze_p(const_CHM_SP, int *Perm, int *fset, size_t fsize, CHM_CM); int cholmod_change_factor(int to_xtype, int to_ll, int to_super, int to_packed, int to_monotonic, CHM_FR L, CHM_CM); CHM_FR cholmod_copy_factor(const_CHM_FR, CHM_CM); CHM_SP cholmod_factor_to_sparse(const_CHM_FR, CHM_CM); CHM_SP cholmod_dense_to_sparse(const_CHM_DN, int values, CHM_CM); int cholmod_defaults (CHM_CM); CHM_SP cholmod_triplet_to_sparse(const cholmod_triplet*, int nzmax, CHM_CM); CHM_SP cholmod_submatrix(const_CHM_SP, int *rset, int rsize, int *cset, int csize, int values, int sorted, CHM_CM); CHM_TR cholmod_sparse_to_triplet(const_CHM_SP, CHM_CM); CHM_DN cholmod_sparse_to_dense(const_CHM_SP, CHM_CM); CHM_TR cholmod_allocate_triplet (size_t nrow, size_t ncol, size_t nzmax, int stype, int xtype, CHM_CM); // from ../../src/CHOLMOD/Include/cholmod_matrixops.h - line 107 : /* scaling modes, selected by the scale input parameter: */ #define CHOLMOD_SCALAR 0 /* A = s*A */ #define CHOLMOD_ROW 1 /* A = diag(s)*A */ #define CHOLMOD_COL 2 /* A = A*diag(s) */ #define CHOLMOD_SYM 3 /* A = diag(s)*A*diag(s) */ int cholmod_scale(const_CHM_DN, int scale, CHM_SP, CHM_CM); // added in the Matrix package - the log of the determinant of the matrix that was factored double chm_factor_ldetL2(const_CHM_FR); #ifdef __cplusplus } #endif #endif /* RcppEigen_CHOLMOD_H */ RcppEigen/inst/include/RcppEigenStubs.h0000644000176200001440000004104512253717461017610 0ustar liggesusers// RcppEigenStubs.cpp: Provide access to compiled CHOLMOD functions in // the Matrix package. // // Copyright (C) 2011 Douglas Bates and Martin Maechler // // This file is part of RcppEigen. // // RcppEigen is free software: you can redistribute it and/or modify it // under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // RcppEigen is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with RcppEigen. If not, see . // Yes, this really is a C++ source file in an include directory. To // use the Cholmod support functions in RcppEigen you should create a // source file, say MyPackage/src/local_stubs.c that contains the // single line #include "RcppEigenStubs.cpp" #include #include #include #ifdef __cplusplus extern "C" { #endif inline CHM_DN as_cholmod_dense(CHM_DN ans, SEXP x) { static CHM_DN(*fun)(CHM_DN,SEXP) = NULL; if(fun == NULL) fun = (CHM_DN(*)(CHM_DN,SEXP)) R_GetCCallable("Matrix", "as_cholmod_dense"); return fun(ans, x); } inline CHM_FR as_cholmod_factor(CHM_FR ans, SEXP x) { static CHM_FR(*fun)(CHM_FR,SEXP) = NULL; if(fun == NULL) fun = (CHM_FR(*)(CHM_FR,SEXP)) R_GetCCallable("Matrix", "as_cholmod_factor"); return fun(ans, x); } inline CHM_SP as_cholmod_sparse(CHM_SP ans, SEXP x, Rboolean check_Udiag, Rboolean sort_in_place) { static CHM_SP(*fun)(CHM_SP,SEXP,Rboolean,Rboolean)= NULL; if(fun == NULL) fun = (CHM_SP(*)(CHM_SP,SEXP,Rboolean,Rboolean)) R_GetCCallable("Matrix", "as_cholmod_sparse"); return fun(ans, x, check_Udiag, sort_in_place); } inline CHM_TR as_cholmod_triplet(CHM_TR ans, SEXP x, Rboolean check_Udiag) { static CHM_TR(*fun)(CHM_TR,SEXP,Rboolean)= NULL; if(fun == NULL) fun = (CHM_TR(*)(CHM_TR,SEXP,Rboolean)) R_GetCCallable("Matrix", "as_cholmod_triplet"); return fun(ans, x, check_Udiag); } inline SEXP Csparse_diagU2N(SEXP x) { static SEXP(*fun)(SEXP) = NULL; if(fun == NULL) fun = (SEXP(*)(SEXP)) R_GetCCallable("Matrix", "Csparse_diagU2N"); return fun(x); } inline SEXP M_chm_factor_to_SEXP(const_CHM_FR f, int dofree) { static SEXP(*fun)(const_CHM_FR,int) = NULL; if(fun == NULL) fun = (SEXP(*)(const_CHM_FR,int)) R_GetCCallable("Matrix", "chm_factor_to_SEXP"); return fun(f, dofree); } inline double M_chm_factor_ldetL2(const_CHM_FR f) { static double(*fun)(const_CHM_FR) = NULL; if(fun == NULL) fun = (double(*)(const_CHM_FR)) R_GetCCallable("Matrix", "chm_factor_ldetL2"); return fun(f); } inline CHM_FR M_chm_factor_update(CHM_FR f, const_CHM_SP A, double mult) { static CHM_FR(*fun)(CHM_FR,const_CHM_SP,double) = NULL; if(fun == NULL) fun = (CHM_FR(*)(CHM_FR,const_CHM_SP,double)) R_GetCCallable("Matrix", "chm_factor_update"); return fun(f, A, mult); } inline SEXP M_chm_sparse_to_SEXP(const_CHM_SP a, int dofree, int uploT, int Rkind, const char *diag, SEXP dn) { static SEXP(*fun)(const_CHM_SP,int,int,int,const char*,SEXP) = NULL; if(fun == NULL) fun = (SEXP(*)(const_CHM_SP,int,int,int,const char*,SEXP)) R_GetCCallable("Matrix", "chm_sparse_to_SEXP"); return fun(a, dofree, uploT, Rkind, diag, dn); } inline SEXP M_chm_triplet_to_SEXP(const CHM_TR a, int dofree, int uploT, int Rkind, const char *diag, SEXP dn) { static SEXP(*fun)(const CHM_TR,int,int,int,const char*,SEXP) = NULL; if(fun == NULL) fun = (SEXP(*)(const CHM_TR,int,int,int,const char*,SEXP)) R_GetCCallable("Matrix", "chm_triplet_to_SEXP"); return fun(a, dofree, uploT, Rkind, diag, dn); } inline CHM_SP cholmod_aat(const_CHM_SP A, int *fset, size_t fsize, int mode, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_SP,int*,size_t, int,CHM_CM) = NULL; if(fun == NULL) fun = (CHM_SP(*)(const_CHM_SP,int*,size_t, int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_aat"); return fun(A, fset, fsize, mode, Common); } inline int M_cholmod_band_inplace(CHM_SP A, int k1, int k2, int mode, CHM_CM Common) { static int(*fun)(CHM_SP,int,int,int,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(CHM_SP,int,int,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_band_inplace"); return fun(A, k1, k2, mode, Common); } inline CHM_SP cholmod_add(const_CHM_SP A, const_CHM_SP B, double alpha[2], double beta[2], int values, int sorted, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_SP,const_CHM_SP, double*,double*,int,int, CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_SP,const_CHM_SP, double*,double*,int,int, CHM_CM)) R_GetCCallable("Matrix", "cholmod_add"); return fun(A, B, alpha, beta, values, sorted, Common); } inline CHM_DN cholmod_allocate_dense(size_t nrow, size_t ncol, size_t d, int xtype, CHM_CM Common) { static CHM_DN(*fun)(size_t,size_t,size_t, int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_DN(*)(size_t,size_t,size_t, int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_allocate_dense"); return fun(nrow, ncol, d, xtype, Common); } inline CHM_SP cholmod_allocate_sparse(size_t nrow, size_t ncol, size_t nzmax, int sorted, int packed, int stype, int xtype, CHM_CM Common) { static CHM_SP(*fun)(size_t,size_t,size_t,int,int, int,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*) (size_t,size_t,size_t,int,int,int,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_allocate_sparse"); return fun(nrow,ncol,nzmax,sorted,packed,stype,xtype,Common); } inline CHM_TR cholmod_allocate_triplet(size_t nrow, size_t ncol, size_t nzmax, int stype, int xtype, CHM_CM Common) { static CHM_TR(*fun)(size_t,size_t,size_t, int,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_TR(*)(size_t,size_t,size_t,int,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_allocate_triplet"); return fun(nrow,ncol,nzmax,stype,xtype,Common); } inline CHM_SP cholmod_triplet_to_sparse(const cholmod_triplet* T, int nzmax, CHM_CM Common) { static CHM_SP(*fun)(const cholmod_triplet*,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const cholmod_triplet*,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_triplet_to_sparse"); return fun(T, nzmax, Common); } inline CHM_TR cholmod_sparse_to_triplet(const_CHM_SP A, CHM_CM Common) { static CHM_TR(*fun)(const_CHM_SP,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_TR(*)(const_CHM_SP,CHM_CM)) R_GetCCallable("Matrix", "cholmod_sparse_to_triplet"); return fun(A, Common); } inline CHM_DN cholmod_sparse_to_dense(const_CHM_SP A, CHM_CM Common) { static CHM_DN(*fun)(const_CHM_SP,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_DN(*)(const_CHM_SP,CHM_CM)) R_GetCCallable("Matrix", "cholmod_sparse_to_dense"); return fun(A, Common); } inline CHM_FR cholmod_analyze(const_CHM_SP A, CHM_CM Common) { static CHM_FR(*fun)(const_CHM_SP,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_FR(*)(const_CHM_SP,CHM_CM)) R_GetCCallable("Matrix", "cholmod_analyze"); return fun(A, Common); } inline CHM_FR cholmod_analyze_p(const_CHM_SP A, int *Perm, int *fset, size_t fsize, CHM_CM Common) { static CHM_FR(*fun)(const_CHM_SP,int*,int*,size_t, CHM_CM) = NULL; if (fun == NULL) fun = (CHM_FR(*)(const_CHM_SP,int*,int*, size_t,CHM_CM)) R_GetCCallable("Matrix", "cholmod_analyze_p"); return fun(A, Perm, fset, fsize, Common); } inline CHM_SP cholmod_copy(const_CHM_SP A, int stype, int mode, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_SP,int,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_SP,int,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_copy"); return fun(A, stype, mode, Common); } inline CHM_DN cholmod_copy_dense(const_CHM_DN A, CHM_CM Common) { static CHM_DN(*fun)(const_CHM_DN,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_DN(*)(const_CHM_DN,CHM_CM)) R_GetCCallable("Matrix", "cholmod_copy_dense"); return fun(A, Common); } inline CHM_FR cholmod_copy_factor(const_CHM_FR L, CHM_CM Common) { static CHM_FR(*fun)(const_CHM_FR,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_FR(*)(const_CHM_FR,CHM_CM)) R_GetCCallable("Matrix", "cholmod_copy_factor"); return fun(L, Common); } inline int cholmod_change_factor(int to_xtype, int to_ll, int to_super, int to_packed, int to_monotonic, CHM_FR L, CHM_CM Common) { static int(*fun)(int,int,int,int,int,CHM_FR,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(int,int,int,int,int,CHM_FR,CHM_CM)) R_GetCCallable("Matrix", "cholmod_change_factor"); return fun(to_xtype, to_ll, to_super, to_packed, to_monotonic, L, Common); } inline CHM_SP cholmod_copy_sparse(const_CHM_SP A, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_SP,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_SP,CHM_CM)) R_GetCCallable("Matrix", "cholmod_copy_sparse"); return fun(A, Common); } inline CHM_SP cholmod_factor_to_sparse(const_CHM_FR L, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_FR,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_FR,CHM_CM)) R_GetCCallable("Matrix", "cholmod_factor_to_sparse"); return fun(L, Common); } inline CHM_SP cholmod_submatrix(const_CHM_SP A, int *rset, int rsize, int *cset, int csize, int values, int sorted, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_SP,int*,int,int*,int, int,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_SP,int*,int,int*, int,int,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_submatrix"); return fun(A, rset, rsize, cset, csize, values, sorted, Common); } inline CHM_SP cholmod_dense_to_sparse(const_CHM_DN X, int values, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_DN,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_DN,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_dense_to_sparse"); return fun(X, values, Common); } inline int cholmod_factorize(const_CHM_SP A, CHM_FR L, CHM_CM Common) { static int(*fun)(const_CHM_SP,CHM_FR,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(const_CHM_SP,CHM_FR,CHM_CM)) R_GetCCallable("Matrix", "cholmod_factorize"); return fun(A, L, Common); } inline int cholmod_factorize_p(const_CHM_SP A, double *beta, int *fset, size_t fsize, CHM_FR L, CHM_CM Common) { static int(*fun)(const_CHM_SP,double*,int*,size_t, CHM_FR,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(const_CHM_SP,double*,int*,size_t, CHM_FR,CHM_CM)) R_GetCCallable("Matrix", "cholmod_factorize_p"); return fun(A, beta, fset, fsize, L, Common); } inline int cholmod_finish(CHM_CM Common) { static int(*fun)(CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(CHM_CM)) R_GetCCallable("Matrix", "cholmod_finish"); return fun(Common); } inline int cholmod_sort(CHM_SP A, CHM_CM Common) { static int(*fun)(CHM_SP,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(CHM_SP,CHM_CM)) R_GetCCallable("Matrix", "cholmod_sort"); return fun(A, Common); } inline int cholmod_free_dense(CHM_DN *A, CHM_CM Common) { static int(*fun)(CHM_DN*,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(CHM_DN*,CHM_CM)) R_GetCCallable("Matrix", "cholmod_free_dense"); return fun(A, Common); } inline int cholmod_free_factor(CHM_FR *L, CHM_CM Common) { static int(*fun)(CHM_FR*,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(CHM_FR*,CHM_CM)) R_GetCCallable("Matrix", "cholmod_free_factor"); return fun(L, Common); } inline int cholmod_free_sparse(CHM_SP *A, CHM_CM Common) { static int(*fun)(CHM_SP*,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(CHM_SP*,CHM_CM)) R_GetCCallable("Matrix", "cholmod_free_sparse"); return fun(A, Common); } inline int cholmod_free_triplet(cholmod_triplet **T, CHM_CM Common) { static int(*fun)(cholmod_triplet**,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(cholmod_triplet**,CHM_CM)) R_GetCCallable("Matrix", "cholmod_free_triplet"); return fun(T, Common); } inline long int cholmod_nnz(const_CHM_SP A, CHM_CM Common) { static long(*fun)(const_CHM_SP,CHM_CM) = NULL; if (fun == NULL) fun = (long(*)(const_CHM_SP,CHM_CM)) R_GetCCallable("Matrix", "cholmod_nnz"); return fun(A, Common); } inline int cholmod_sdmult(const_CHM_SP A, int transpose, const double *alpha, const double *beta, const_CHM_DN X, CHM_DN Y, CHM_CM Common) { static int(*fun)(const_CHM_SP,int,const double*, const double*,const_CHM_DN, CHM_DN,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(const_CHM_SP,int,const double*, const double*, const_CHM_DN, CHM_DN,CHM_CM)) R_GetCCallable("Matrix", "cholmod_sdmult"); return fun(A, transpose, alpha, beta, X, Y, Common); } inline CHM_SP cholmod_ssmult(const_CHM_SP A, const_CHM_SP B, int stype, int values, int sorted, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_SP,const_CHM_SP, int,int,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_SP,const_CHM_SP, int,int,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_ssmult"); return fun(A, B, stype, values, sorted, Common); } inline CHM_DN cholmod_solve(int sys, const_CHM_FR L, const_CHM_DN B, CHM_CM Common) { static CHM_DN(*fun)(int,const_CHM_FR,const_CHM_DN, CHM_CM) = NULL; if (fun == NULL) fun = (CHM_DN(*)(int,const_CHM_FR,const_CHM_DN, CHM_CM)) R_GetCCallable("Matrix", "cholmod_solve"); return fun(sys, L, B, Common); } inline CHM_SP cholmod_speye(size_t nrow, size_t ncol, int xtype, CHM_CM Common) { static CHM_SP(*fun)(size_t,size_t,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(size_t,size_t,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_speye"); return fun(nrow, ncol, xtype, Common); } inline CHM_SP cholmod_spsolve(int sys, const_CHM_FR L, const_CHM_SP B, CHM_CM Common) { static CHM_SP(*fun)(int,const_CHM_FR, const_CHM_SP, CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(int,const_CHM_FR, const_CHM_SP, CHM_CM)) R_GetCCallable("Matrix", "cholmod_spsolve"); return fun(sys, L, B, Common); } inline int cholmod_defaults (CHM_CM Common) { static int(*fun)(CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(CHM_CM)) R_GetCCallable("Matrix", "cholmod_defaults"); return fun(Common); } /* extern cholmod_common c; */ inline void R_cholmod_error(int status, const char *file, int line, const char *message) { /* NB: keep in sync with R_cholmod_error(), ../../src/chm_common.c */ if(status < 0) { /* Note: Matrix itself uses CHM_set_common_env, CHM_store_common * and CHM_restore_common to preserve settings through error calls. * Consider defining your own error handler, *and* possibly restoring * *your* version of the cholmod_common that *you* use. */ Rf_error("Cholmod error '%s' at file:%s, line %d", message, file, line); } else Rf_warning("Cholmod warning '%s' at file:%s, line %d", message, file, line); } inline int cholmod_start(CHM_CM Common) { int val; static int(*fun)(CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(CHM_CM)) R_GetCCallable("Matrix", "cholmod_start"); val = fun(Common); /*-- NB: keep in sync with R_cholmod_start() --> ../../src/chm_common.c */ /* do not allow CHOLMOD printing - currently */ Common->print_function = NULL;/* was R_cholmod_printf; /.* Rprintf gives warning */ /* Consider using your own error handler: */ Common->error_handler = R_cholmod_error; return val; } inline CHM_SP cholmod_transpose(const_CHM_SP A, int values, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_SP,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_SP,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_transpose"); return fun(A, values, Common); } inline CHM_SP cholmod_vertcat(const_CHM_SP A, const_CHM_SP B, int values, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_SP,const_CHM_SP,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_SP,const_CHM_SP, int, CHM_CM)) R_GetCCallable("Matrix", "cholmod_vertcat"); return fun(A, B, values, Common); } inline int cholmod_scale(const_CHM_DN S, int scale, CHM_SP A, CHM_CM Common) { static int(*fun)(const_CHM_DN,int,CHM_SP, CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(const_CHM_DN,int,CHM_SP, CHM_CM)) R_GetCCallable("Matrix", "cholmod_scale"); return fun(S, scale, A, Common); } #ifdef __cplusplus } #endif RcppEigen/inst/include/unsupported/0000755000176200001440000000000012253717461017126 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/0000755000176200001440000000000013100733213020136 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/AdolcForward0000644000176200001440000001022113100733213022424 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ADLOC_FORWARD #define EIGEN_ADLOC_FORWARD //-------------------------------------------------------------------------------- // // This file provides support for adolc's adouble type in forward mode. // ADOL-C is a C++ automatic differentiation library, // see https://projects.coin-or.org/ADOL-C for more information. // // Note that the maximal number of directions is controlled by // the preprocessor token NUMBER_DIRECTIONS. The default is 2. // //-------------------------------------------------------------------------------- #define ADOLC_TAPELESS #ifndef NUMBER_DIRECTIONS # define NUMBER_DIRECTIONS 2 #endif #include // adolc defines some very stupid macros: #if defined(malloc) # undef malloc #endif #if defined(calloc) # undef calloc #endif #if defined(realloc) # undef realloc #endif #include namespace Eigen { /** * \defgroup AdolcForward_Module Adolc forward module * This module provides support for adolc's adouble type in forward mode. * ADOL-C is a C++ automatic differentiation library, * see https://projects.coin-or.org/ADOL-C for more information. * It mainly consists in: * - a struct Eigen::NumTraits specialization * - overloads of internal::* math function for adtl::adouble type. * * Note that the maximal number of directions is controlled by * the preprocessor token NUMBER_DIRECTIONS. The default is 2. * * \code * #include * \endcode */ //@{ } // namespace Eigen // Eigen's require a few additional functions which must be defined in the same namespace // than the custom scalar type own namespace namespace adtl { inline const adouble& conj(const adouble& x) { return x; } inline const adouble& real(const adouble& x) { return x; } inline adouble imag(const adouble&) { return 0.; } inline adouble abs(const adouble& x) { return fabs(x); } inline adouble abs2(const adouble& x) { return x*x; } } namespace Eigen { template<> struct NumTraits : NumTraits { typedef adtl::adouble Real; typedef adtl::adouble NonInteger; typedef adtl::adouble Nested; enum { IsComplex = 0, IsInteger = 0, IsSigned = 1, RequireInitialization = 1, ReadCost = 1, AddCost = 1, MulCost = 1 }; }; template class AdolcForwardJacobian : public Functor { typedef adtl::adouble ActiveScalar; public: AdolcForwardJacobian() : Functor() {} AdolcForwardJacobian(const Functor& f) : Functor(f) {} // forward constructors template AdolcForwardJacobian(const T0& a0) : Functor(a0) {} template AdolcForwardJacobian(const T0& a0, const T1& a1) : Functor(a0, a1) {} template AdolcForwardJacobian(const T0& a0, const T1& a1, const T1& a2) : Functor(a0, a1, a2) {} typedef typename Functor::InputType InputType; typedef typename Functor::ValueType ValueType; typedef typename Functor::JacobianType JacobianType; typedef Matrix ActiveInput; typedef Matrix ActiveValue; void operator() (const InputType& x, ValueType* v, JacobianType* _jac) const { eigen_assert(v!=0); if (!_jac) { Functor::operator()(x, v); return; } JacobianType& jac = *_jac; ActiveInput ax = x.template cast(); ActiveValue av(jac.rows()); for (int j=0; j // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPLINES_MODULE_H #define EIGEN_SPLINES_MODULE_H namespace Eigen { /** * \defgroup Splines_Module Spline and spline fitting module * * This module provides a simple multi-dimensional spline class while * offering most basic functionality to fit a spline to point sets. * * \code * #include * \endcode */ } #include "src/Splines/SplineFwd.h" #include "src/Splines/Spline.h" #include "src/Splines/SplineFitting.h" #endif // EIGEN_SPLINES_MODULE_H RcppEigen/inst/include/unsupported/Eigen/NumericalDiff0000644000176200001440000000335512253717461022616 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Thomas Capricelli // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_NUMERICALDIFF_MODULE #define EIGEN_NUMERICALDIFF_MODULE #include namespace Eigen { /** * \defgroup NumericalDiff_Module Numerical differentiation module * * \code * #include * \endcode * * See http://en.wikipedia.org/wiki/Numerical_differentiation * * Warning : this should NOT be confused with automatic differentiation, which * is a different method and has its own module in Eigen : \ref * AutoDiff_Module. * * Currently only "Forward" and "Central" schemes are implemented. Those * are basic methods, and there exist some more elaborated way of * computing such approximates. They are implemented using both * proprietary and free software, and usually requires linking to an * external library. It is very easy for you to write a functor * using such software, and the purpose is quite orthogonal to what we * want to achieve with Eigen. * * This is why we will not provide wrappers for every great numerical * differentiation software that exist, but should rather stick with those * basic ones, that still are useful for testing. * * Also, the \ref NonLinearOptimization_Module needs this in order to * provide full features compatibility with the original (c)minpack * package. * */ } //@{ #include "src/NumericalDiff/NumericalDiff.h" //@} #endif // EIGEN_NUMERICALDIFF_MODULE RcppEigen/inst/include/unsupported/Eigen/src/0000755000176200001440000000000013100733213020725 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/Splines/0000755000176200001440000000000013100733213022342 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/Splines/SplineFitting.h0000644000176200001440000004002013100733213025266 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 20010-2011 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPLINE_FITTING_H #define EIGEN_SPLINE_FITTING_H #include #include #include #include #include "SplineFwd.h" #include #include namespace Eigen { /** * \brief Computes knot averages. * \ingroup Splines_Module * * The knots are computed as * \f{align*} * u_0 & = \hdots = u_p = 0 \\ * u_{m-p} & = \hdots = u_{m} = 1 \\ * u_{j+p} & = \frac{1}{p}\sum_{i=j}^{j+p-1}\bar{u}_i \quad\quad j=1,\hdots,n-p * \f} * where \f$p\f$ is the degree and \f$m+1\f$ the number knots * of the desired interpolating spline. * * \param[in] parameters The input parameters. During interpolation one for each data point. * \param[in] degree The spline degree which is used during the interpolation. * \param[out] knots The output knot vector. * * \sa Les Piegl and Wayne Tiller, The NURBS book (2nd ed.), 1997, 9.2.1 Global Curve Interpolation to Point Data **/ template void KnotAveraging(const KnotVectorType& parameters, DenseIndex degree, KnotVectorType& knots) { knots.resize(parameters.size()+degree+1); for (DenseIndex j=1; j void KnotAveragingWithDerivatives(const ParameterVectorType& parameters, const unsigned int degree, const IndexArray& derivativeIndices, KnotVectorType& knots) { typedef typename ParameterVectorType::Scalar Scalar; DenseIndex numParameters = parameters.size(); DenseIndex numDerivatives = derivativeIndices.size(); if (numDerivatives < 1) { KnotAveraging(parameters, degree, knots); return; } DenseIndex startIndex; DenseIndex endIndex; DenseIndex numInternalDerivatives = numDerivatives; if (derivativeIndices[0] == 0) { startIndex = 0; --numInternalDerivatives; } else { startIndex = 1; } if (derivativeIndices[numDerivatives - 1] == numParameters - 1) { endIndex = numParameters - degree; --numInternalDerivatives; } else { endIndex = numParameters - degree - 1; } // There are (endIndex - startIndex + 1) knots obtained from the averaging // and 2 for the first and last parameters. DenseIndex numAverageKnots = endIndex - startIndex + 3; KnotVectorType averageKnots(numAverageKnots); averageKnots[0] = parameters[0]; int newKnotIndex = 0; for (DenseIndex i = startIndex; i <= endIndex; ++i) averageKnots[++newKnotIndex] = parameters.segment(i, degree).mean(); averageKnots[++newKnotIndex] = parameters[numParameters - 1]; newKnotIndex = -1; ParameterVectorType temporaryParameters(numParameters + 1); KnotVectorType derivativeKnots(numInternalDerivatives); for (DenseIndex i = 0; i < numAverageKnots - 1; ++i) { temporaryParameters[0] = averageKnots[i]; ParameterVectorType parameterIndices(numParameters); int temporaryParameterIndex = 1; for (DenseIndex j = 0; j < numParameters; ++j) { Scalar parameter = parameters[j]; if (parameter >= averageKnots[i] && parameter < averageKnots[i + 1]) { parameterIndices[temporaryParameterIndex] = j; temporaryParameters[temporaryParameterIndex++] = parameter; } } temporaryParameters[temporaryParameterIndex] = averageKnots[i + 1]; for (int j = 0; j <= temporaryParameterIndex - 2; ++j) { for (DenseIndex k = 0; k < derivativeIndices.size(); ++k) { if (parameterIndices[j + 1] == derivativeIndices[k] && parameterIndices[j + 1] != 0 && parameterIndices[j + 1] != numParameters - 1) { derivativeKnots[++newKnotIndex] = temporaryParameters.segment(j, 3).mean(); break; } } } } KnotVectorType temporaryKnots(averageKnots.size() + derivativeKnots.size()); std::merge(averageKnots.data(), averageKnots.data() + averageKnots.size(), derivativeKnots.data(), derivativeKnots.data() + derivativeKnots.size(), temporaryKnots.data()); // Number of knots (one for each point and derivative) plus spline order. DenseIndex numKnots = numParameters + numDerivatives + degree + 1; knots.resize(numKnots); knots.head(degree).fill(temporaryKnots[0]); knots.tail(degree).fill(temporaryKnots.template tail<1>()[0]); knots.segment(degree, temporaryKnots.size()) = temporaryKnots; } /** * \brief Computes chord length parameters which are required for spline interpolation. * \ingroup Splines_Module * * \param[in] pts The data points to which a spline should be fit. * \param[out] chord_lengths The resulting chord lenggth vector. * * \sa Les Piegl and Wayne Tiller, The NURBS book (2nd ed.), 1997, 9.2.1 Global Curve Interpolation to Point Data **/ template void ChordLengths(const PointArrayType& pts, KnotVectorType& chord_lengths) { typedef typename KnotVectorType::Scalar Scalar; const DenseIndex n = pts.cols(); // 1. compute the column-wise norms chord_lengths.resize(pts.cols()); chord_lengths[0] = 0; chord_lengths.rightCols(n-1) = (pts.array().leftCols(n-1) - pts.array().rightCols(n-1)).matrix().colwise().norm(); // 2. compute the partial sums std::partial_sum(chord_lengths.data(), chord_lengths.data()+n, chord_lengths.data()); // 3. normalize the data chord_lengths /= chord_lengths(n-1); chord_lengths(n-1) = Scalar(1); } /** * \brief Spline fitting methods. * \ingroup Splines_Module **/ template struct SplineFitting { typedef typename SplineType::KnotVectorType KnotVectorType; typedef typename SplineType::ParameterVectorType ParameterVectorType; /** * \brief Fits an interpolating Spline to the given data points. * * \param pts The points for which an interpolating spline will be computed. * \param degree The degree of the interpolating spline. * * \returns A spline interpolating the initially provided points. **/ template static SplineType Interpolate(const PointArrayType& pts, DenseIndex degree); /** * \brief Fits an interpolating Spline to the given data points. * * \param pts The points for which an interpolating spline will be computed. * \param degree The degree of the interpolating spline. * \param knot_parameters The knot parameters for the interpolation. * * \returns A spline interpolating the initially provided points. **/ template static SplineType Interpolate(const PointArrayType& pts, DenseIndex degree, const KnotVectorType& knot_parameters); /** * \brief Fits an interpolating spline to the given data points and * derivatives. * * \param points The points for which an interpolating spline will be computed. * \param derivatives The desired derivatives of the interpolating spline at interpolation * points. * \param derivativeIndices An array indicating which point each derivative belongs to. This * must be the same size as @a derivatives. * \param degree The degree of the interpolating spline. * * \returns A spline interpolating @a points with @a derivatives at those points. * * \sa Les A. Piegl, Khairan Rajab, Volha Smarodzinana. 2008. * Curve interpolation with directional constraints for engineering design. * Engineering with Computers **/ template static SplineType InterpolateWithDerivatives(const PointArrayType& points, const PointArrayType& derivatives, const IndexArray& derivativeIndices, const unsigned int degree); /** * \brief Fits an interpolating spline to the given data points and derivatives. * * \param points The points for which an interpolating spline will be computed. * \param derivatives The desired derivatives of the interpolating spline at interpolation points. * \param derivativeIndices An array indicating which point each derivative belongs to. This * must be the same size as @a derivatives. * \param degree The degree of the interpolating spline. * \param parameters The parameters corresponding to the interpolation points. * * \returns A spline interpolating @a points with @a derivatives at those points. * * \sa Les A. Piegl, Khairan Rajab, Volha Smarodzinana. 2008. * Curve interpolation with directional constraints for engineering design. * Engineering with Computers */ template static SplineType InterpolateWithDerivatives(const PointArrayType& points, const PointArrayType& derivatives, const IndexArray& derivativeIndices, const unsigned int degree, const ParameterVectorType& parameters); }; template template SplineType SplineFitting::Interpolate(const PointArrayType& pts, DenseIndex degree, const KnotVectorType& knot_parameters) { typedef typename SplineType::KnotVectorType::Scalar Scalar; typedef typename SplineType::ControlPointVectorType ControlPointVectorType; typedef Matrix MatrixType; KnotVectorType knots; KnotAveraging(knot_parameters, degree, knots); DenseIndex n = pts.cols(); MatrixType A = MatrixType::Zero(n,n); for (DenseIndex i=1; i qr(A); // Here, we are creating a temporary due to an Eigen issue. ControlPointVectorType ctrls = qr.solve(MatrixType(pts.transpose())).transpose(); return SplineType(knots, ctrls); } template template SplineType SplineFitting::Interpolate(const PointArrayType& pts, DenseIndex degree) { KnotVectorType chord_lengths; // knot parameters ChordLengths(pts, chord_lengths); return Interpolate(pts, degree, chord_lengths); } template template SplineType SplineFitting::InterpolateWithDerivatives(const PointArrayType& points, const PointArrayType& derivatives, const IndexArray& derivativeIndices, const unsigned int degree, const ParameterVectorType& parameters) { typedef typename SplineType::KnotVectorType::Scalar Scalar; typedef typename SplineType::ControlPointVectorType ControlPointVectorType; typedef Matrix MatrixType; const DenseIndex n = points.cols() + derivatives.cols(); KnotVectorType knots; KnotAveragingWithDerivatives(parameters, degree, derivativeIndices, knots); // fill matrix MatrixType A = MatrixType::Zero(n, n); // Use these dimensions for quicker populating, then transpose for solving. MatrixType b(points.rows(), n); DenseIndex startRow; DenseIndex derivativeStart; // End derivatives. if (derivativeIndices[0] == 0) { A.template block<1, 2>(1, 0) << -1, 1; Scalar y = (knots(degree + 1) - knots(0)) / degree; b.col(1) = y*derivatives.col(0); startRow = 2; derivativeStart = 1; } else { startRow = 1; derivativeStart = 0; } if (derivativeIndices[derivatives.cols() - 1] == points.cols() - 1) { A.template block<1, 2>(n - 2, n - 2) << -1, 1; Scalar y = (knots(knots.size() - 1) - knots(knots.size() - (degree + 2))) / degree; b.col(b.cols() - 2) = y*derivatives.col(derivatives.cols() - 1); } DenseIndex row = startRow; DenseIndex derivativeIndex = derivativeStart; for (DenseIndex i = 1; i < parameters.size() - 1; ++i) { const DenseIndex span = SplineType::Span(parameters[i], degree, knots); if (derivativeIndices[derivativeIndex] == i) { A.block(row, span - degree, 2, degree + 1) = SplineType::BasisFunctionDerivatives(parameters[i], 1, degree, knots); b.col(row++) = points.col(i); b.col(row++) = derivatives.col(derivativeIndex++); } else { A.row(row++).segment(span - degree, degree + 1) = SplineType::BasisFunctions(parameters[i], degree, knots); } } b.col(0) = points.col(0); b.col(b.cols() - 1) = points.col(points.cols() - 1); A(0,0) = 1; A(n - 1, n - 1) = 1; // Solve FullPivLU lu(A); ControlPointVectorType controlPoints = lu.solve(MatrixType(b.transpose())).transpose(); SplineType spline(knots, controlPoints); return spline; } template template SplineType SplineFitting::InterpolateWithDerivatives(const PointArrayType& points, const PointArrayType& derivatives, const IndexArray& derivativeIndices, const unsigned int degree) { ParameterVectorType parameters; ChordLengths(points, parameters); return InterpolateWithDerivatives(points, derivatives, derivativeIndices, degree, parameters); } } #endif // EIGEN_SPLINE_FITTING_H RcppEigen/inst/include/unsupported/Eigen/src/Splines/SplineFwd.h0000644000176200001440000001031413100733213024405 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 20010-2011 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPLINES_FWD_H #define EIGEN_SPLINES_FWD_H #include namespace Eigen { template class Spline; template < typename SplineType, int DerivativeOrder = Dynamic > struct SplineTraits {}; /** * \ingroup Splines_Module * \brief Compile-time attributes of the Spline class for Dynamic degree. **/ template struct SplineTraits< Spline<_Scalar, _Dim, _Degree>, Dynamic > { typedef _Scalar Scalar; /*!< The spline curve's scalar type. */ enum { Dimension = _Dim /*!< The spline curve's dimension. */ }; enum { Degree = _Degree /*!< The spline curve's degree. */ }; enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ }; enum { NumOfDerivativesAtCompileTime = OrderAtCompileTime /*!< The number of derivatives defined for the current spline. */ }; enum { DerivativeMemoryLayout = Dimension==1 ? RowMajor : ColMajor /*!< The derivative type's memory layout. */ }; /** \brief The data type used to store non-zero basis functions. */ typedef Array BasisVectorType; /** \brief The data type used to store the values of the basis function derivatives. */ typedef Array BasisDerivativeType; /** \brief The data type used to store the spline's derivative values. */ typedef Array DerivativeType; /** \brief The point type the spline is representing. */ typedef Array PointType; /** \brief The data type used to store knot vectors. */ typedef Array KnotVectorType; /** \brief The data type used to store parameter vectors. */ typedef Array ParameterVectorType; /** \brief The data type representing the spline's control points. */ typedef Array ControlPointVectorType; }; /** * \ingroup Splines_Module * \brief Compile-time attributes of the Spline class for fixed degree. * * The traits class inherits all attributes from the SplineTraits of Dynamic degree. **/ template < typename _Scalar, int _Dim, int _Degree, int _DerivativeOrder > struct SplineTraits< Spline<_Scalar, _Dim, _Degree>, _DerivativeOrder > : public SplineTraits< Spline<_Scalar, _Dim, _Degree> > { enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ }; enum { NumOfDerivativesAtCompileTime = _DerivativeOrder==Dynamic ? Dynamic : _DerivativeOrder+1 /*!< The number of derivatives defined for the current spline. */ }; enum { DerivativeMemoryLayout = _Dim==1 ? RowMajor : ColMajor /*!< The derivative type's memory layout. */ }; /** \brief The data type used to store the values of the basis function derivatives. */ typedef Array<_Scalar,Dynamic,Dynamic,RowMajor,NumOfDerivativesAtCompileTime,OrderAtCompileTime> BasisDerivativeType; /** \brief The data type used to store the spline's derivative values. */ typedef Array<_Scalar,_Dim,Dynamic,DerivativeMemoryLayout,_Dim,NumOfDerivativesAtCompileTime> DerivativeType; }; /** \brief 2D float B-spline with dynamic degree. */ typedef Spline Spline2f; /** \brief 3D float B-spline with dynamic degree. */ typedef Spline Spline3f; /** \brief 2D double B-spline with dynamic degree. */ typedef Spline Spline2d; /** \brief 3D double B-spline with dynamic degree. */ typedef Spline Spline3d; } #endif // EIGEN_SPLINES_FWD_H RcppEigen/inst/include/unsupported/Eigen/src/Splines/Spline.h0000644000176200001440000004416413100733213023756 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 20010-2011 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPLINE_H #define EIGEN_SPLINE_H #include "SplineFwd.h" namespace Eigen { /** * \ingroup Splines_Module * \class Spline * \brief A class representing multi-dimensional spline curves. * * The class represents B-splines with non-uniform knot vectors. Each control * point of the B-spline is associated with a basis function * \f{align*} * C(u) & = \sum_{i=0}^{n}N_{i,p}(u)P_i * \f} * * \tparam _Scalar The underlying data type (typically float or double) * \tparam _Dim The curve dimension (e.g. 2 or 3) * \tparam _Degree Per default set to Dynamic; could be set to the actual desired * degree for optimization purposes (would result in stack allocation * of several temporary variables). **/ template class Spline { public: typedef _Scalar Scalar; /*!< The spline curve's scalar type. */ enum { Dimension = _Dim /*!< The spline curve's dimension. */ }; enum { Degree = _Degree /*!< The spline curve's degree. */ }; /** \brief The point type the spline is representing. */ typedef typename SplineTraits::PointType PointType; /** \brief The data type used to store knot vectors. */ typedef typename SplineTraits::KnotVectorType KnotVectorType; /** \brief The data type used to store parameter vectors. */ typedef typename SplineTraits::ParameterVectorType ParameterVectorType; /** \brief The data type used to store non-zero basis functions. */ typedef typename SplineTraits::BasisVectorType BasisVectorType; /** \brief The data type used to store the values of the basis function derivatives. */ typedef typename SplineTraits::BasisDerivativeType BasisDerivativeType; /** \brief The data type representing the spline's control points. */ typedef typename SplineTraits::ControlPointVectorType ControlPointVectorType; /** * \brief Creates a (constant) zero spline. * For Splines with dynamic degree, the resulting degree will be 0. **/ Spline() : m_knots(1, (Degree==Dynamic ? 2 : 2*Degree+2)) , m_ctrls(ControlPointVectorType::Zero(Dimension,(Degree==Dynamic ? 1 : Degree+1))) { // in theory this code can go to the initializer list but it will get pretty // much unreadable ... enum { MinDegree = (Degree==Dynamic ? 0 : Degree) }; m_knots.template segment(0) = Array::Zero(); m_knots.template segment(MinDegree+1) = Array::Ones(); } /** * \brief Creates a spline from a knot vector and control points. * \param knots The spline's knot vector. * \param ctrls The spline's control point vector. **/ template Spline(const OtherVectorType& knots, const OtherArrayType& ctrls) : m_knots(knots), m_ctrls(ctrls) {} /** * \brief Copy constructor for splines. * \param spline The input spline. **/ template Spline(const Spline& spline) : m_knots(spline.knots()), m_ctrls(spline.ctrls()) {} /** * \brief Returns the knots of the underlying spline. **/ const KnotVectorType& knots() const { return m_knots; } /** * \brief Returns the ctrls of the underlying spline. **/ const ControlPointVectorType& ctrls() const { return m_ctrls; } /** * \brief Returns the spline value at a given site \f$u\f$. * * The function returns * \f{align*} * C(u) & = \sum_{i=0}^{n}N_{i,p}P_i * \f} * * \param u Parameter \f$u \in [0;1]\f$ at which the spline is evaluated. * \return The spline value at the given location \f$u\f$. **/ PointType operator()(Scalar u) const; /** * \brief Evaluation of spline derivatives of up-to given order. * * The function returns * \f{align*} * \frac{d^i}{du^i}C(u) & = \sum_{i=0}^{n} \frac{d^i}{du^i} N_{i,p}(u)P_i * \f} * for i ranging between 0 and order. * * \param u Parameter \f$u \in [0;1]\f$ at which the spline derivative is evaluated. * \param order The order up to which the derivatives are computed. **/ typename SplineTraits::DerivativeType derivatives(Scalar u, DenseIndex order) const; /** * \copydoc Spline::derivatives * Using the template version of this function is more efficieent since * temporary objects are allocated on the stack whenever this is possible. **/ template typename SplineTraits::DerivativeType derivatives(Scalar u, DenseIndex order = DerivativeOrder) const; /** * \brief Computes the non-zero basis functions at the given site. * * Splines have local support and a point from their image is defined * by exactly \f$p+1\f$ control points \f$P_i\f$ where \f$p\f$ is the * spline degree. * * This function computes the \f$p+1\f$ non-zero basis function values * for a given parameter value \f$u\f$. It returns * \f{align*}{ * N_{i,p}(u), \hdots, N_{i+p+1,p}(u) * \f} * * \param u Parameter \f$u \in [0;1]\f$ at which the non-zero basis functions * are computed. **/ typename SplineTraits::BasisVectorType basisFunctions(Scalar u) const; /** * \brief Computes the non-zero spline basis function derivatives up to given order. * * The function computes * \f{align*}{ * \frac{d^i}{du^i} N_{i,p}(u), \hdots, \frac{d^i}{du^i} N_{i+p+1,p}(u) * \f} * with i ranging from 0 up to the specified order. * * \param u Parameter \f$u \in [0;1]\f$ at which the non-zero basis function * derivatives are computed. * \param order The order up to which the basis function derivatives are computes. **/ typename SplineTraits::BasisDerivativeType basisFunctionDerivatives(Scalar u, DenseIndex order) const; /** * \copydoc Spline::basisFunctionDerivatives * Using the template version of this function is more efficieent since * temporary objects are allocated on the stack whenever this is possible. **/ template typename SplineTraits::BasisDerivativeType basisFunctionDerivatives(Scalar u, DenseIndex order = DerivativeOrder) const; /** * \brief Returns the spline degree. **/ DenseIndex degree() const; /** * \brief Returns the span within the knot vector in which u is falling. * \param u The site for which the span is determined. **/ DenseIndex span(Scalar u) const; /** * \brief Computes the spang within the provided knot vector in which u is falling. **/ static DenseIndex Span(typename SplineTraits::Scalar u, DenseIndex degree, const typename SplineTraits::KnotVectorType& knots); /** * \brief Returns the spline's non-zero basis functions. * * The function computes and returns * \f{align*}{ * N_{i,p}(u), \hdots, N_{i+p+1,p}(u) * \f} * * \param u The site at which the basis functions are computed. * \param degree The degree of the underlying spline. * \param knots The underlying spline's knot vector. **/ static BasisVectorType BasisFunctions(Scalar u, DenseIndex degree, const KnotVectorType& knots); /** * \copydoc Spline::basisFunctionDerivatives * \param degree The degree of the underlying spline * \param knots The underlying spline's knot vector. **/ static BasisDerivativeType BasisFunctionDerivatives( const Scalar u, const DenseIndex order, const DenseIndex degree, const KnotVectorType& knots); private: KnotVectorType m_knots; /*!< Knot vector. */ ControlPointVectorType m_ctrls; /*!< Control points. */ template static void BasisFunctionDerivativesImpl( const typename Spline<_Scalar, _Dim, _Degree>::Scalar u, const DenseIndex order, const DenseIndex p, const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& U, DerivativeType& N_); }; template DenseIndex Spline<_Scalar, _Dim, _Degree>::Span( typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::Scalar u, DenseIndex degree, const typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::KnotVectorType& knots) { // Piegl & Tiller, "The NURBS Book", A2.1 (p. 68) if (u <= knots(0)) return degree; const Scalar* pos = std::upper_bound(knots.data()+degree-1, knots.data()+knots.size()-degree-1, u); return static_cast( std::distance(knots.data(), pos) - 1 ); } template typename Spline<_Scalar, _Dim, _Degree>::BasisVectorType Spline<_Scalar, _Dim, _Degree>::BasisFunctions( typename Spline<_Scalar, _Dim, _Degree>::Scalar u, DenseIndex degree, const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& knots) { typedef typename Spline<_Scalar, _Dim, _Degree>::BasisVectorType BasisVectorType; const DenseIndex p = degree; const DenseIndex i = Spline::Span(u, degree, knots); const KnotVectorType& U = knots; BasisVectorType left(p+1); left(0) = Scalar(0); BasisVectorType right(p+1); right(0) = Scalar(0); VectorBlock(left,1,p) = u - VectorBlock(U,i+1-p,p).reverse(); VectorBlock(right,1,p) = VectorBlock(U,i+1,p) - u; BasisVectorType N(1,p+1); N(0) = Scalar(1); for (DenseIndex j=1; j<=p; ++j) { Scalar saved = Scalar(0); for (DenseIndex r=0; r DenseIndex Spline<_Scalar, _Dim, _Degree>::degree() const { if (_Degree == Dynamic) return m_knots.size() - m_ctrls.cols() - 1; else return _Degree; } template DenseIndex Spline<_Scalar, _Dim, _Degree>::span(Scalar u) const { return Spline::Span(u, degree(), knots()); } template typename Spline<_Scalar, _Dim, _Degree>::PointType Spline<_Scalar, _Dim, _Degree>::operator()(Scalar u) const { enum { Order = SplineTraits::OrderAtCompileTime }; const DenseIndex span = this->span(u); const DenseIndex p = degree(); const BasisVectorType basis_funcs = basisFunctions(u); const Replicate ctrl_weights(basis_funcs); const Block ctrl_pts(ctrls(),0,span-p,Dimension,p+1); return (ctrl_weights * ctrl_pts).rowwise().sum(); } /* --------------------------------------------------------------------------------------------- */ template void derivativesImpl(const SplineType& spline, typename SplineType::Scalar u, DenseIndex order, DerivativeType& der) { enum { Dimension = SplineTraits::Dimension }; enum { Order = SplineTraits::OrderAtCompileTime }; enum { DerivativeOrder = DerivativeType::ColsAtCompileTime }; typedef typename SplineTraits::ControlPointVectorType ControlPointVectorType; typedef typename SplineTraits::BasisDerivativeType BasisDerivativeType; typedef typename BasisDerivativeType::ConstRowXpr BasisDerivativeRowXpr; const DenseIndex p = spline.degree(); const DenseIndex span = spline.span(u); const DenseIndex n = (std::min)(p, order); der.resize(Dimension,n+1); // Retrieve the basis function derivatives up to the desired order... const BasisDerivativeType basis_func_ders = spline.template basisFunctionDerivatives(u, n+1); // ... and perform the linear combinations of the control points. for (DenseIndex der_order=0; der_order ctrl_weights( basis_func_ders.row(der_order) ); const Block ctrl_pts(spline.ctrls(),0,span-p,Dimension,p+1); der.col(der_order) = (ctrl_weights * ctrl_pts).rowwise().sum(); } } template typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::DerivativeType Spline<_Scalar, _Dim, _Degree>::derivatives(Scalar u, DenseIndex order) const { typename SplineTraits< Spline >::DerivativeType res; derivativesImpl(*this, u, order, res); return res; } template template typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::DerivativeType Spline<_Scalar, _Dim, _Degree>::derivatives(Scalar u, DenseIndex order) const { typename SplineTraits< Spline, DerivativeOrder >::DerivativeType res; derivativesImpl(*this, u, order, res); return res; } template typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::BasisVectorType Spline<_Scalar, _Dim, _Degree>::basisFunctions(Scalar u) const { return Spline::BasisFunctions(u, degree(), knots()); } /* --------------------------------------------------------------------------------------------- */ template template void Spline<_Scalar, _Dim, _Degree>::BasisFunctionDerivativesImpl( const typename Spline<_Scalar, _Dim, _Degree>::Scalar u, const DenseIndex order, const DenseIndex p, const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& U, DerivativeType& N_) { typedef Spline<_Scalar, _Dim, _Degree> SplineType; enum { Order = SplineTraits::OrderAtCompileTime }; typedef typename SplineTraits::Scalar Scalar; typedef typename SplineTraits::BasisVectorType BasisVectorType; const DenseIndex span = SplineType::Span(u, p, U); const DenseIndex n = (std::min)(p, order); N_.resize(n+1, p+1); BasisVectorType left = BasisVectorType::Zero(p+1); BasisVectorType right = BasisVectorType::Zero(p+1); Matrix ndu(p+1,p+1); Scalar saved, temp; // FIXME These were double instead of Scalar. Was there a reason for that? ndu(0,0) = 1.0; DenseIndex j; for (j=1; j<=p; ++j) { left[j] = u-U[span+1-j]; right[j] = U[span+j]-u; saved = 0.0; for (DenseIndex r=0; r(saved+right[r+1] * temp); saved = left[j-r] * temp; } ndu(j,j) = static_cast(saved); } for (j = p; j>=0; --j) N_(0,j) = ndu(j,p); // Compute the derivatives DerivativeType a(n+1,p+1); DenseIndex r=0; for (; r<=p; ++r) { DenseIndex s1,s2; s1 = 0; s2 = 1; // alternate rows in array a a(0,0) = 1.0; // Compute the k-th derivative for (DenseIndex k=1; k<=static_cast(n); ++k) { Scalar d = 0.0; DenseIndex rk,pk,j1,j2; rk = r-k; pk = p-k; if (r>=k) { a(s2,0) = a(s1,0)/ndu(pk+1,rk); d = a(s2,0)*ndu(rk,pk); } if (rk>=-1) j1 = 1; else j1 = -rk; if (r-1 <= pk) j2 = k-1; else j2 = p-r; for (j=j1; j<=j2; ++j) { a(s2,j) = (a(s1,j)-a(s1,j-1))/ndu(pk+1,rk+j); d += a(s2,j)*ndu(rk+j,pk); } if (r<=pk) { a(s2,k) = -a(s1,k-1)/ndu(pk+1,r); d += a(s2,k)*ndu(r,pk); } N_(k,r) = static_cast(d); j = s1; s1 = s2; s2 = j; // Switch rows } } /* Multiply through by the correct factors */ /* (Eq. [2.9]) */ r = p; for (DenseIndex k=1; k<=static_cast(n); ++k) { for (j=p; j>=0; --j) N_(k,j) *= r; r *= p-k; } } template typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::BasisDerivativeType Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const { typename SplineTraits >::BasisDerivativeType der; BasisFunctionDerivativesImpl(u, order, degree(), knots(), der); return der; } template template typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::BasisDerivativeType Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const { typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::BasisDerivativeType der; BasisFunctionDerivativesImpl(u, order, degree(), knots(), der); return der; } template typename SplineTraits >::BasisDerivativeType Spline<_Scalar, _Dim, _Degree>::BasisFunctionDerivatives( const typename Spline<_Scalar, _Dim, _Degree>::Scalar u, const DenseIndex order, const DenseIndex degree, const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& knots) { typename SplineTraits::BasisDerivativeType der; BasisFunctionDerivativesImpl(u, order, degree, knots, der); return der; } } #endif // EIGEN_SPLINE_H RcppEigen/inst/include/unsupported/Eigen/src/Eigenvalues/0000755000176200001440000000000013100733213023174 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h0000644000176200001440000007220213100733213031037 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 David Harmon // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 3 of the License, or (at your option) any later version. // // Alternatively, you can redistribute it and/or // modify it under the terms of the GNU General Public License as // published by the Free Software Foundation; either version 2 of // the License, or (at your option) any later version. // // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the // GNU General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License and a copy of the GNU General Public License along with // Eigen. If not, see . #ifndef EIGEN_ARPACKGENERALIZEDSELFADJOINTEIGENSOLVER_H #define EIGEN_ARPACKGENERALIZEDSELFADJOINTEIGENSOLVER_H #include namespace Eigen { namespace internal { template struct arpack_wrapper; template struct OP; } template, bool BisSPD=false> class ArpackGeneralizedSelfAdjointEigenSolver { public: //typedef typename MatrixSolver::MatrixType MatrixType; /** \brief Scalar type for matrices of type \p MatrixType. */ typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; /** \brief Real scalar type for \p MatrixType. * * This is just \c Scalar if #Scalar is real (e.g., \c float or * \c Scalar), and the type of the real part of \c Scalar if #Scalar is * complex. */ typedef typename NumTraits::Real RealScalar; /** \brief Type for vector of eigenvalues as returned by eigenvalues(). * * This is a column vector with entries of type #RealScalar. * The length of the vector is the size of \p nbrEigenvalues. */ typedef typename internal::plain_col_type::type RealVectorType; /** \brief Default constructor. * * The default constructor is for cases in which the user intends to * perform decompositions via compute(). * */ ArpackGeneralizedSelfAdjointEigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false), m_eigenvectorsOk(false), m_nbrConverged(0), m_nbrIterations(0) { } /** \brief Constructor; computes generalized eigenvalues of given matrix with respect to another matrix. * * \param[in] A Self-adjoint matrix whose eigenvalues / eigenvectors will * computed. By default, the upper triangular part is used, but can be changed * through the template parameter. * \param[in] B Self-adjoint matrix for the generalized eigenvalue problem. * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute. * Must be less than the size of the input matrix, or an error is returned. * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with * respective meanings to find the largest magnitude , smallest magnitude, * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this * value can contain floating point value in string form, in which case the * eigenvalues closest to this value will be found. * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which * means machine precision. * * This constructor calls compute(const MatrixType&, const MatrixType&, Index, string, int, RealScalar) * to compute the eigenvalues of the matrix \p A with respect to \p B. The eigenvectors are computed if * \p options equals #ComputeEigenvectors. * */ ArpackGeneralizedSelfAdjointEigenSolver(const MatrixType& A, const MatrixType& B, Index nbrEigenvalues, std::string eigs_sigma="LM", int options=ComputeEigenvectors, RealScalar tol=0.0) : m_eivec(), m_eivalues(), m_isInitialized(false), m_eigenvectorsOk(false), m_nbrConverged(0), m_nbrIterations(0) { compute(A, B, nbrEigenvalues, eigs_sigma, options, tol); } /** \brief Constructor; computes eigenvalues of given matrix. * * \param[in] A Self-adjoint matrix whose eigenvalues / eigenvectors will * computed. By default, the upper triangular part is used, but can be changed * through the template parameter. * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute. * Must be less than the size of the input matrix, or an error is returned. * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with * respective meanings to find the largest magnitude , smallest magnitude, * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this * value can contain floating point value in string form, in which case the * eigenvalues closest to this value will be found. * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which * means machine precision. * * This constructor calls compute(const MatrixType&, Index, string, int, RealScalar) * to compute the eigenvalues of the matrix \p A. The eigenvectors are computed if * \p options equals #ComputeEigenvectors. * */ ArpackGeneralizedSelfAdjointEigenSolver(const MatrixType& A, Index nbrEigenvalues, std::string eigs_sigma="LM", int options=ComputeEigenvectors, RealScalar tol=0.0) : m_eivec(), m_eivalues(), m_isInitialized(false), m_eigenvectorsOk(false), m_nbrConverged(0), m_nbrIterations(0) { compute(A, nbrEigenvalues, eigs_sigma, options, tol); } /** \brief Computes generalized eigenvalues / eigenvectors of given matrix using the external ARPACK library. * * \param[in] A Selfadjoint matrix whose eigendecomposition is to be computed. * \param[in] B Selfadjoint matrix for generalized eigenvalues. * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute. * Must be less than the size of the input matrix, or an error is returned. * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with * respective meanings to find the largest magnitude , smallest magnitude, * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this * value can contain floating point value in string form, in which case the * eigenvalues closest to this value will be found. * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which * means machine precision. * * \returns Reference to \c *this * * This function computes the generalized eigenvalues of \p A with respect to \p B using ARPACK. The eigenvalues() * function can be used to retrieve them. If \p options equals #ComputeEigenvectors, * then the eigenvectors are also computed and can be retrieved by * calling eigenvectors(). * */ ArpackGeneralizedSelfAdjointEigenSolver& compute(const MatrixType& A, const MatrixType& B, Index nbrEigenvalues, std::string eigs_sigma="LM", int options=ComputeEigenvectors, RealScalar tol=0.0); /** \brief Computes eigenvalues / eigenvectors of given matrix using the external ARPACK library. * * \param[in] A Selfadjoint matrix whose eigendecomposition is to be computed. * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute. * Must be less than the size of the input matrix, or an error is returned. * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with * respective meanings to find the largest magnitude , smallest magnitude, * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this * value can contain floating point value in string form, in which case the * eigenvalues closest to this value will be found. * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which * means machine precision. * * \returns Reference to \c *this * * This function computes the eigenvalues of \p A using ARPACK. The eigenvalues() * function can be used to retrieve them. If \p options equals #ComputeEigenvectors, * then the eigenvectors are also computed and can be retrieved by * calling eigenvectors(). * */ ArpackGeneralizedSelfAdjointEigenSolver& compute(const MatrixType& A, Index nbrEigenvalues, std::string eigs_sigma="LM", int options=ComputeEigenvectors, RealScalar tol=0.0); /** \brief Returns the eigenvectors of given matrix. * * \returns A const reference to the matrix whose columns are the eigenvectors. * * \pre The eigenvectors have been computed before. * * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding * to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The * eigenvectors are normalized to have (Euclidean) norm equal to one. If * this object was used to solve the eigenproblem for the selfadjoint * matrix \f$ A \f$, then the matrix returned by this function is the * matrix \f$ V \f$ in the eigendecomposition \f$ A V = D V \f$. * For the generalized eigenproblem, the matrix returned is the solution \f$ A V = D B V \f$ * * Example: \include SelfAdjointEigenSolver_eigenvectors.cpp * Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out * * \sa eigenvalues() */ const Matrix& eigenvectors() const { eigen_assert(m_isInitialized && "ArpackGeneralizedSelfAdjointEigenSolver is not initialized."); eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); return m_eivec; } /** \brief Returns the eigenvalues of given matrix. * * \returns A const reference to the column vector containing the eigenvalues. * * \pre The eigenvalues have been computed before. * * The eigenvalues are repeated according to their algebraic multiplicity, * so there are as many eigenvalues as rows in the matrix. The eigenvalues * are sorted in increasing order. * * Example: \include SelfAdjointEigenSolver_eigenvalues.cpp * Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out * * \sa eigenvectors(), MatrixBase::eigenvalues() */ const Matrix& eigenvalues() const { eigen_assert(m_isInitialized && "ArpackGeneralizedSelfAdjointEigenSolver is not initialized."); return m_eivalues; } /** \brief Computes the positive-definite square root of the matrix. * * \returns the positive-definite square root of the matrix * * \pre The eigenvalues and eigenvectors of a positive-definite matrix * have been computed before. * * The square root of a positive-definite matrix \f$ A \f$ is the * positive-definite matrix whose square equals \f$ A \f$. This function * uses the eigendecomposition \f$ A = V D V^{-1} \f$ to compute the * square root as \f$ A^{1/2} = V D^{1/2} V^{-1} \f$. * * Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp * Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out * * \sa operatorInverseSqrt(), * \ref MatrixFunctions_Module "MatrixFunctions Module" */ Matrix operatorSqrt() const { eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint(); } /** \brief Computes the inverse square root of the matrix. * * \returns the inverse positive-definite square root of the matrix * * \pre The eigenvalues and eigenvectors of a positive-definite matrix * have been computed before. * * This function uses the eigendecomposition \f$ A = V D V^{-1} \f$ to * compute the inverse square root as \f$ V D^{-1/2} V^{-1} \f$. This is * cheaper than first computing the square root with operatorSqrt() and * then its inverse with MatrixBase::inverse(). * * Example: \include SelfAdjointEigenSolver_operatorInverseSqrt.cpp * Output: \verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out * * \sa operatorSqrt(), MatrixBase::inverse(), * \ref MatrixFunctions_Module "MatrixFunctions Module" */ Matrix operatorInverseSqrt() const { eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint(); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, \c NoConvergence otherwise. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "ArpackGeneralizedSelfAdjointEigenSolver is not initialized."); return m_info; } size_t getNbrConvergedEigenValues() const { return m_nbrConverged; } size_t getNbrIterations() const { return m_nbrIterations; } protected: Matrix m_eivec; Matrix m_eivalues; ComputationInfo m_info; bool m_isInitialized; bool m_eigenvectorsOk; size_t m_nbrConverged; size_t m_nbrIterations; }; template ArpackGeneralizedSelfAdjointEigenSolver& ArpackGeneralizedSelfAdjointEigenSolver ::compute(const MatrixType& A, Index nbrEigenvalues, std::string eigs_sigma, int options, RealScalar tol) { MatrixType B(0,0); compute(A, B, nbrEigenvalues, eigs_sigma, options, tol); return *this; } template ArpackGeneralizedSelfAdjointEigenSolver& ArpackGeneralizedSelfAdjointEigenSolver ::compute(const MatrixType& A, const MatrixType& B, Index nbrEigenvalues, std::string eigs_sigma, int options, RealScalar tol) { eigen_assert(A.cols() == A.rows()); eigen_assert(B.cols() == B.rows()); eigen_assert(B.rows() == 0 || A.cols() == B.rows()); eigen_assert((options &~ (EigVecMask | GenEigMask)) == 0 && (options & EigVecMask) != EigVecMask && "invalid option parameter"); bool isBempty = (B.rows() == 0) || (B.cols() == 0); // For clarity, all parameters match their ARPACK name // // Always 0 on the first call // int ido = 0; int n = (int)A.cols(); // User options: "LA", "SA", "SM", "LM", "BE" // char whch[3] = "LM"; // Specifies the shift if iparam[6] = { 3, 4, 5 }, not used if iparam[6] = { 1, 2 } // RealScalar sigma = 0.0; if (eigs_sigma.length() >= 2 && isalpha(eigs_sigma[0]) && isalpha(eigs_sigma[1])) { eigs_sigma[0] = toupper(eigs_sigma[0]); eigs_sigma[1] = toupper(eigs_sigma[1]); // In the following special case we're going to invert the problem, since solving // for larger magnitude is much much faster // i.e., if 'SM' is specified, we're going to really use 'LM', the default // if (eigs_sigma.substr(0,2) != "SM") { whch[0] = eigs_sigma[0]; whch[1] = eigs_sigma[1]; } } else { eigen_assert(false && "Specifying clustered eigenvalues is not yet supported!"); // If it's not scalar values, then the user may be explicitly // specifying the sigma value to cluster the evs around // sigma = atof(eigs_sigma.c_str()); // If atof fails, it returns 0.0, which is a fine default // } // "I" means normal eigenvalue problem, "G" means generalized // char bmat[2] = "I"; if (eigs_sigma.substr(0,2) == "SM" || !(isalpha(eigs_sigma[0]) && isalpha(eigs_sigma[1])) || (!isBempty && !BisSPD)) bmat[0] = 'G'; // Now we determine the mode to use // int mode = (bmat[0] == 'G') + 1; if (eigs_sigma.substr(0,2) == "SM" || !(isalpha(eigs_sigma[0]) && isalpha(eigs_sigma[1]))) { // We're going to use shift-and-invert mode, and basically find // the largest eigenvalues of the inverse operator // mode = 3; } // The user-specified number of eigenvalues/vectors to compute // int nev = (int)nbrEigenvalues; // Allocate space for ARPACK to store the residual // Scalar *resid = new Scalar[n]; // Number of Lanczos vectors, must satisfy nev < ncv <= n // Note that this indicates that nev != n, and we cannot compute // all eigenvalues of a mtrix // int ncv = std::min(std::max(2*nev, 20), n); // The working n x ncv matrix, also store the final eigenvectors (if computed) // Scalar *v = new Scalar[n*ncv]; int ldv = n; // Working space // Scalar *workd = new Scalar[3*n]; int lworkl = ncv*ncv+8*ncv; // Must be at least this length Scalar *workl = new Scalar[lworkl]; int *iparam= new int[11]; iparam[0] = 1; // 1 means we let ARPACK perform the shifts, 0 means we'd have to do it iparam[2] = std::max(300, (int)std::ceil(2*n/std::max(ncv,1))); iparam[6] = mode; // The mode, 1 is standard ev problem, 2 for generalized ev, 3 for shift-and-invert // Used during reverse communicate to notify where arrays start // int *ipntr = new int[11]; // Error codes are returned in here, initial value of 0 indicates a random initial // residual vector is used, any other values means resid contains the initial residual // vector, possibly from a previous run // int info = 0; Scalar scale = 1.0; //if (!isBempty) //{ //Scalar scale = B.norm() / std::sqrt(n); //scale = std::pow(2, std::floor(std::log(scale+1))); ////M /= scale; //for (size_t i=0; i<(size_t)B.outerSize(); i++) // for (typename MatrixType::InnerIterator it(B, i); it; ++it) // it.valueRef() /= scale; //} MatrixSolver OP; if (mode == 1 || mode == 2) { if (!isBempty) OP.compute(B); } else if (mode == 3) { if (sigma == 0.0) { OP.compute(A); } else { // Note: We will never enter here because sigma must be 0.0 // if (isBempty) { MatrixType AminusSigmaB(A); for (Index i=0; i::saupd(&ido, bmat, &n, whch, &nev, &tol, resid, &ncv, v, &ldv, iparam, ipntr, workd, workl, &lworkl, &info); if (ido == -1 || ido == 1) { Scalar *in = workd + ipntr[0] - 1; Scalar *out = workd + ipntr[1] - 1; if (ido == 1 && mode != 2) { Scalar *out2 = workd + ipntr[2] - 1; if (isBempty || mode == 1) Matrix::Map(out2, n) = Matrix::Map(in, n); else Matrix::Map(out2, n) = B * Matrix::Map(in, n); in = workd + ipntr[2] - 1; } if (mode == 1) { if (isBempty) { // OP = A // Matrix::Map(out, n) = A * Matrix::Map(in, n); } else { // OP = L^{-1}AL^{-T} // internal::OP::applyOP(OP, A, n, in, out); } } else if (mode == 2) { if (ido == 1) Matrix::Map(in, n) = A * Matrix::Map(in, n); // OP = B^{-1} A // Matrix::Map(out, n) = OP.solve(Matrix::Map(in, n)); } else if (mode == 3) { // OP = (A-\sigmaB)B (\sigma could be 0, and B could be I) // The B * in is already computed and stored at in if ido == 1 // if (ido == 1 || isBempty) Matrix::Map(out, n) = OP.solve(Matrix::Map(in, n)); else Matrix::Map(out, n) = OP.solve(B * Matrix::Map(in, n)); } } else if (ido == 2) { Scalar *in = workd + ipntr[0] - 1; Scalar *out = workd + ipntr[1] - 1; if (isBempty || mode == 1) Matrix::Map(out, n) = Matrix::Map(in, n); else Matrix::Map(out, n) = B * Matrix::Map(in, n); } } while (ido != 99); if (info == 1) m_info = NoConvergence; else if (info == 3) m_info = NumericalIssue; else if (info < 0) m_info = InvalidInput; else if (info != 0) eigen_assert(false && "Unknown ARPACK return value!"); else { // Do we compute eigenvectors or not? // int rvec = (options & ComputeEigenvectors) == ComputeEigenvectors; // "A" means "All", use "S" to choose specific eigenvalues (not yet supported in ARPACK)) // char howmny[2] = "A"; // if howmny == "S", specifies the eigenvalues to compute (not implemented in ARPACK) // int *select = new int[ncv]; // Final eigenvalues // m_eivalues.resize(nev, 1); internal::arpack_wrapper::seupd(&rvec, howmny, select, m_eivalues.data(), v, &ldv, &sigma, bmat, &n, whch, &nev, &tol, resid, &ncv, v, &ldv, iparam, ipntr, workd, workl, &lworkl, &info); if (info == -14) m_info = NoConvergence; else if (info != 0) m_info = InvalidInput; else { if (rvec) { m_eivec.resize(A.rows(), nev); for (int i=0; i::project(OP, n, nev, m_eivec.data()); m_eigenvectorsOk = true; } m_nbrIterations = iparam[2]; m_nbrConverged = iparam[4]; m_info = Success; } delete[] select; } delete[] v; delete[] iparam; delete[] ipntr; delete[] workd; delete[] workl; delete[] resid; m_isInitialized = true; return *this; } // Single precision // extern "C" void ssaupd_(int *ido, char *bmat, int *n, char *which, int *nev, float *tol, float *resid, int *ncv, float *v, int *ldv, int *iparam, int *ipntr, float *workd, float *workl, int *lworkl, int *info); extern "C" void sseupd_(int *rvec, char *All, int *select, float *d, float *z, int *ldz, float *sigma, char *bmat, int *n, char *which, int *nev, float *tol, float *resid, int *ncv, float *v, int *ldv, int *iparam, int *ipntr, float *workd, float *workl, int *lworkl, int *ierr); // Double precision // extern "C" void dsaupd_(int *ido, char *bmat, int *n, char *which, int *nev, double *tol, double *resid, int *ncv, double *v, int *ldv, int *iparam, int *ipntr, double *workd, double *workl, int *lworkl, int *info); extern "C" void dseupd_(int *rvec, char *All, int *select, double *d, double *z, int *ldz, double *sigma, char *bmat, int *n, char *which, int *nev, double *tol, double *resid, int *ncv, double *v, int *ldv, int *iparam, int *ipntr, double *workd, double *workl, int *lworkl, int *ierr); namespace internal { template struct arpack_wrapper { static inline void saupd(int *ido, char *bmat, int *n, char *which, int *nev, RealScalar *tol, Scalar *resid, int *ncv, Scalar *v, int *ldv, int *iparam, int *ipntr, Scalar *workd, Scalar *workl, int *lworkl, int *info) { EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL) } static inline void seupd(int *rvec, char *All, int *select, Scalar *d, Scalar *z, int *ldz, RealScalar *sigma, char *bmat, int *n, char *which, int *nev, RealScalar *tol, Scalar *resid, int *ncv, Scalar *v, int *ldv, int *iparam, int *ipntr, Scalar *workd, Scalar *workl, int *lworkl, int *ierr) { EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL) } }; template <> struct arpack_wrapper { static inline void saupd(int *ido, char *bmat, int *n, char *which, int *nev, float *tol, float *resid, int *ncv, float *v, int *ldv, int *iparam, int *ipntr, float *workd, float *workl, int *lworkl, int *info) { ssaupd_(ido, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, workd, workl, lworkl, info); } static inline void seupd(int *rvec, char *All, int *select, float *d, float *z, int *ldz, float *sigma, char *bmat, int *n, char *which, int *nev, float *tol, float *resid, int *ncv, float *v, int *ldv, int *iparam, int *ipntr, float *workd, float *workl, int *lworkl, int *ierr) { sseupd_(rvec, All, select, d, z, ldz, sigma, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, workd, workl, lworkl, ierr); } }; template <> struct arpack_wrapper { static inline void saupd(int *ido, char *bmat, int *n, char *which, int *nev, double *tol, double *resid, int *ncv, double *v, int *ldv, int *iparam, int *ipntr, double *workd, double *workl, int *lworkl, int *info) { dsaupd_(ido, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, workd, workl, lworkl, info); } static inline void seupd(int *rvec, char *All, int *select, double *d, double *z, int *ldz, double *sigma, char *bmat, int *n, char *which, int *nev, double *tol, double *resid, int *ncv, double *v, int *ldv, int *iparam, int *ipntr, double *workd, double *workl, int *lworkl, int *ierr) { dseupd_(rvec, All, select, d, v, ldv, sigma, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, workd, workl, lworkl, ierr); } }; template struct OP { static inline void applyOP(MatrixSolver &OP, const MatrixType &A, int n, Scalar *in, Scalar *out); static inline void project(MatrixSolver &OP, int n, int k, Scalar *vecs); }; template struct OP { static inline void applyOP(MatrixSolver &OP, const MatrixType &A, int n, Scalar *in, Scalar *out) { // OP = L^{-1} A L^{-T} (B = LL^T) // // First solve L^T out = in // Matrix::Map(out, n) = OP.matrixU().solve(Matrix::Map(in, n)); Matrix::Map(out, n) = OP.permutationPinv() * Matrix::Map(out, n); // Then compute out = A out // Matrix::Map(out, n) = A * Matrix::Map(out, n); // Then solve L out = out // Matrix::Map(out, n) = OP.permutationP() * Matrix::Map(out, n); Matrix::Map(out, n) = OP.matrixL().solve(Matrix::Map(out, n)); } static inline void project(MatrixSolver &OP, int n, int k, Scalar *vecs) { // Solve L^T out = in // Matrix::Map(vecs, n, k) = OP.matrixU().solve(Matrix::Map(vecs, n, k)); Matrix::Map(vecs, n, k) = OP.permutationPinv() * Matrix::Map(vecs, n, k); } }; template struct OP { static inline void applyOP(MatrixSolver &OP, const MatrixType &A, int n, Scalar *in, Scalar *out) { eigen_assert(false && "Should never be in here..."); } static inline void project(MatrixSolver &OP, int n, int k, Scalar *vecs) { eigen_assert(false && "Should never be in here..."); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_ARPACKSELFADJOINTEIGENSOLVER_H RcppEigen/inst/include/unsupported/Eigen/src/NumericalDiff/0000755000176200001440000000000012253717461023454 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h0000644000176200001440000000766412253717461026352 0ustar liggesusers// -*- coding: utf-8 // vim: set fileencoding=utf-8 // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Thomas Capricelli // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_NUMERICAL_DIFF_H #define EIGEN_NUMERICAL_DIFF_H namespace Eigen { enum NumericalDiffMode { Forward, Central }; /** * This class allows you to add a method df() to your functor, which will * use numerical differentiation to compute an approximate of the * derivative for the functor. Of course, if you have an analytical form * for the derivative, you should rather implement df() by yourself. * * More information on * http://en.wikipedia.org/wiki/Numerical_differentiation * * Currently only "Forward" and "Central" scheme are implemented. */ template class NumericalDiff : public _Functor { public: typedef _Functor Functor; typedef typename Functor::Scalar Scalar; typedef typename Functor::InputType InputType; typedef typename Functor::ValueType ValueType; typedef typename Functor::JacobianType JacobianType; NumericalDiff(Scalar _epsfcn=0.) : Functor(), epsfcn(_epsfcn) {} NumericalDiff(const Functor& f, Scalar _epsfcn=0.) : Functor(f), epsfcn(_epsfcn) {} // forward constructors template NumericalDiff(const T0& a0) : Functor(a0), epsfcn(0) {} template NumericalDiff(const T0& a0, const T1& a1) : Functor(a0, a1), epsfcn(0) {} template NumericalDiff(const T0& a0, const T1& a1, const T2& a2) : Functor(a0, a1, a2), epsfcn(0) {} enum { InputsAtCompileTime = Functor::InputsAtCompileTime, ValuesAtCompileTime = Functor::ValuesAtCompileTime }; /** * return the number of evaluation of functor */ int df(const InputType& _x, JacobianType &jac) const { using std::sqrt; using std::abs; /* Local variables */ Scalar h; int nfev=0; const typename InputType::Index n = _x.size(); const Scalar eps = sqrt(((std::max)(epsfcn,NumTraits::epsilon() ))); ValueType val1, val2; InputType x = _x; // TODO : we should do this only if the size is not already known val1.resize(Functor::values()); val2.resize(Functor::values()); // initialization switch(mode) { case Forward: // compute f(x) Functor::operator()(x, val1); nfev++; break; case Central: // do nothing break; default: eigen_assert(false); }; // Function Body for (int j = 0; j < n; ++j) { h = eps * abs(x[j]); if (h == 0.) { h = eps; } switch(mode) { case Forward: x[j] += h; Functor::operator()(x, val2); nfev++; x[j] = _x[j]; jac.col(j) = (val2-val1)/h; break; case Central: x[j] += h; Functor::operator()(x, val2); nfev++; x[j] -= 2*h; Functor::operator()(x, val1); nfev++; x[j] = _x[j]; jac.col(j) = (val2-val1)/(2*h); break; default: eigen_assert(false); }; } return nfev; } private: Scalar epsfcn; NumericalDiff& operator=(const NumericalDiff&); }; } // end namespace Eigen //vim: ai ts=4 sts=4 et sw=4 #endif // EIGEN_NUMERICAL_DIFF_H RcppEigen/inst/include/unsupported/Eigen/src/KroneckerProduct/0000755000176200001440000000000013100733213024211 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h0000644000176200001440000002376613100733213031057 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Kolja Brix // Copyright (C) 2011 Andreas Platen // Copyright (C) 2012 Chen-Pang He // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef KRONECKER_TENSOR_PRODUCT_H #define KRONECKER_TENSOR_PRODUCT_H namespace Eigen { /*! * \ingroup KroneckerProduct_Module * * \brief The base class of dense and sparse Kronecker product. * * \tparam Derived is the derived type. */ template class KroneckerProductBase : public ReturnByValue { private: typedef typename internal::traits Traits; typedef typename Traits::Scalar Scalar; protected: typedef typename Traits::Lhs Lhs; typedef typename Traits::Rhs Rhs; public: /*! \brief Constructor. */ KroneckerProductBase(const Lhs& A, const Rhs& B) : m_A(A), m_B(B) {} inline Index rows() const { return m_A.rows() * m_B.rows(); } inline Index cols() const { return m_A.cols() * m_B.cols(); } /*! * This overrides ReturnByValue::coeff because this function is * efficient enough. */ Scalar coeff(Index row, Index col) const { return m_A.coeff(row / m_B.rows(), col / m_B.cols()) * m_B.coeff(row % m_B.rows(), col % m_B.cols()); } /*! * This overrides ReturnByValue::coeff because this function is * efficient enough. */ Scalar coeff(Index i) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); return m_A.coeff(i / m_A.size()) * m_B.coeff(i % m_A.size()); } protected: typename Lhs::Nested m_A; typename Rhs::Nested m_B; }; /*! * \ingroup KroneckerProduct_Module * * \brief Kronecker tensor product helper class for dense matrices * * This class is the return value of kroneckerProduct(MatrixBase, * MatrixBase). Use the function rather than construct this class * directly to avoid specifying template prarameters. * * \tparam Lhs Type of the left-hand side, a matrix expression. * \tparam Rhs Type of the rignt-hand side, a matrix expression. */ template class KroneckerProduct : public KroneckerProductBase > { private: typedef KroneckerProductBase Base; using Base::m_A; using Base::m_B; public: /*! \brief Constructor. */ KroneckerProduct(const Lhs& A, const Rhs& B) : Base(A, B) {} /*! \brief Evaluate the Kronecker tensor product. */ template void evalTo(Dest& dst) const; }; /*! * \ingroup KroneckerProduct_Module * * \brief Kronecker tensor product helper class for sparse matrices * * If at least one of the operands is a sparse matrix expression, * then this class is returned and evaluates into a sparse matrix. * * This class is the return value of kroneckerProduct(EigenBase, * EigenBase). Use the function rather than construct this class * directly to avoid specifying template prarameters. * * \tparam Lhs Type of the left-hand side, a matrix expression. * \tparam Rhs Type of the rignt-hand side, a matrix expression. */ template class KroneckerProductSparse : public KroneckerProductBase > { private: typedef KroneckerProductBase Base; using Base::m_A; using Base::m_B; public: /*! \brief Constructor. */ KroneckerProductSparse(const Lhs& A, const Rhs& B) : Base(A, B) {} /*! \brief Evaluate the Kronecker tensor product. */ template void evalTo(Dest& dst) const; }; template template void KroneckerProduct::evalTo(Dest& dst) const { const int BlockRows = Rhs::RowsAtCompileTime, BlockCols = Rhs::ColsAtCompileTime; const Index Br = m_B.rows(), Bc = m_B.cols(); for (Index i=0; i < m_A.rows(); ++i) for (Index j=0; j < m_A.cols(); ++j) Block(dst,i*Br,j*Bc,Br,Bc) = m_A.coeff(i,j) * m_B; } template template void KroneckerProductSparse::evalTo(Dest& dst) const { Index Br = m_B.rows(), Bc = m_B.cols(); dst.resize(this->rows(), this->cols()); dst.resizeNonZeros(0); // 1 - evaluate the operands if needed: typedef typename internal::nested_eval::type Lhs1; typedef typename internal::remove_all::type Lhs1Cleaned; const Lhs1 lhs1(m_A); typedef typename internal::nested_eval::type Rhs1; typedef typename internal::remove_all::type Rhs1Cleaned; const Rhs1 rhs1(m_B); // 2 - construct respective iterators typedef Eigen::InnerIterator LhsInnerIterator; typedef Eigen::InnerIterator RhsInnerIterator; // compute number of non-zeros per innervectors of dst { // TODO VectorXi is not necessarily big enough! VectorXi nnzA = VectorXi::Zero(Dest::IsRowMajor ? m_A.rows() : m_A.cols()); for (Index kA=0; kA < m_A.outerSize(); ++kA) for (LhsInnerIterator itA(lhs1,kA); itA; ++itA) nnzA(Dest::IsRowMajor ? itA.row() : itA.col())++; VectorXi nnzB = VectorXi::Zero(Dest::IsRowMajor ? m_B.rows() : m_B.cols()); for (Index kB=0; kB < m_B.outerSize(); ++kB) for (RhsInnerIterator itB(rhs1,kB); itB; ++itB) nnzB(Dest::IsRowMajor ? itB.row() : itB.col())++; Matrix nnzAB = nnzB * nnzA.transpose(); dst.reserve(VectorXi::Map(nnzAB.data(), nnzAB.size())); } for (Index kA=0; kA < m_A.outerSize(); ++kA) { for (Index kB=0; kB < m_B.outerSize(); ++kB) { for (LhsInnerIterator itA(lhs1,kA); itA; ++itA) { for (RhsInnerIterator itB(rhs1,kB); itB; ++itB) { Index i = itA.row() * Br + itB.row(), j = itA.col() * Bc + itB.col(); dst.insert(i,j) = itA.value() * itB.value(); } } } } } namespace internal { template struct traits > { typedef typename remove_all<_Lhs>::type Lhs; typedef typename remove_all<_Rhs>::type Rhs; typedef typename ScalarBinaryOpTraits::ReturnType Scalar; typedef typename promote_index_type::type StorageIndex; enum { Rows = size_at_compile_time::RowsAtCompileTime, traits::RowsAtCompileTime>::ret, Cols = size_at_compile_time::ColsAtCompileTime, traits::ColsAtCompileTime>::ret, MaxRows = size_at_compile_time::MaxRowsAtCompileTime, traits::MaxRowsAtCompileTime>::ret, MaxCols = size_at_compile_time::MaxColsAtCompileTime, traits::MaxColsAtCompileTime>::ret }; typedef Matrix ReturnType; }; template struct traits > { typedef MatrixXpr XprKind; typedef typename remove_all<_Lhs>::type Lhs; typedef typename remove_all<_Rhs>::type Rhs; typedef typename ScalarBinaryOpTraits::ReturnType Scalar; typedef typename cwise_promote_storage_type::StorageKind, typename traits::StorageKind, scalar_product_op >::ret StorageKind; typedef typename promote_index_type::type StorageIndex; enum { LhsFlags = Lhs::Flags, RhsFlags = Rhs::Flags, RowsAtCompileTime = size_at_compile_time::RowsAtCompileTime, traits::RowsAtCompileTime>::ret, ColsAtCompileTime = size_at_compile_time::ColsAtCompileTime, traits::ColsAtCompileTime>::ret, MaxRowsAtCompileTime = size_at_compile_time::MaxRowsAtCompileTime, traits::MaxRowsAtCompileTime>::ret, MaxColsAtCompileTime = size_at_compile_time::MaxColsAtCompileTime, traits::MaxColsAtCompileTime>::ret, EvalToRowMajor = (LhsFlags & RhsFlags & RowMajorBit), RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit), Flags = ((LhsFlags | RhsFlags) & HereditaryBits & RemovedBits) | EvalBeforeNestingBit, CoeffReadCost = HugeCost }; typedef SparseMatrix ReturnType; }; } // end namespace internal /*! * \ingroup KroneckerProduct_Module * * Computes Kronecker tensor product of two dense matrices * * \warning If you want to replace a matrix by its Kronecker product * with some matrix, do \b NOT do this: * \code * A = kroneckerProduct(A,B); // bug!!! caused by aliasing effect * \endcode * instead, use eval() to work around this: * \code * A = kroneckerProduct(A,B).eval(); * \endcode * * \param a Dense matrix a * \param b Dense matrix b * \return Kronecker tensor product of a and b */ template KroneckerProduct kroneckerProduct(const MatrixBase& a, const MatrixBase& b) { return KroneckerProduct(a.derived(), b.derived()); } /*! * \ingroup KroneckerProduct_Module * * Computes Kronecker tensor product of two matrices, at least one of * which is sparse * * \warning If you want to replace a matrix by its Kronecker product * with some matrix, do \b NOT do this: * \code * A = kroneckerProduct(A,B); // bug!!! caused by aliasing effect * \endcode * instead, use eval() to work around this: * \code * A = kroneckerProduct(A,B).eval(); * \endcode * * \param a Dense/sparse matrix a * \param b Dense/sparse matrix b * \return Kronecker tensor product of a and b, stored in a sparse * matrix */ template KroneckerProductSparse kroneckerProduct(const EigenBase& a, const EigenBase& b) { return KroneckerProductSparse(a.derived(), b.derived()); } } // end namespace Eigen #endif // KRONECKER_TENSOR_PRODUCT_H RcppEigen/inst/include/unsupported/Eigen/src/LevenbergMarquardt/0000755000176200001440000000000013100733213024517 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h0000644000176200001440000003175413100733213030474 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Thomas Capricelli // Copyright (C) 2012 Desire Nuentsa // // The algorithm of this class initially comes from MINPACK whose original authors are: // Copyright Jorge More - Argonne National Laboratory // Copyright Burt Garbow - Argonne National Laboratory // Copyright Ken Hillstrom - Argonne National Laboratory // // This Source Code Form is subject to the terms of the Minpack license // (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LEVENBERGMARQUARDT_H #define EIGEN_LEVENBERGMARQUARDT_H namespace Eigen { namespace LevenbergMarquardtSpace { enum Status { NotStarted = -2, Running = -1, ImproperInputParameters = 0, RelativeReductionTooSmall = 1, RelativeErrorTooSmall = 2, RelativeErrorAndReductionTooSmall = 3, CosinusTooSmall = 4, TooManyFunctionEvaluation = 5, FtolTooSmall = 6, XtolTooSmall = 7, GtolTooSmall = 8, UserAsked = 9 }; } template struct DenseFunctor { typedef _Scalar Scalar; enum { InputsAtCompileTime = NX, ValuesAtCompileTime = NY }; typedef Matrix InputType; typedef Matrix ValueType; typedef Matrix JacobianType; typedef ColPivHouseholderQR QRSolver; const int m_inputs, m_values; DenseFunctor() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {} DenseFunctor(int inputs, int values) : m_inputs(inputs), m_values(values) {} int inputs() const { return m_inputs; } int values() const { return m_values; } //int operator()(const InputType &x, ValueType& fvec) { } // should be defined in derived classes //int df(const InputType &x, JacobianType& fjac) { } // should be defined in derived classes }; template struct SparseFunctor { typedef _Scalar Scalar; typedef _Index Index; typedef Matrix InputType; typedef Matrix ValueType; typedef SparseMatrix JacobianType; typedef SparseQR > QRSolver; enum { InputsAtCompileTime = Dynamic, ValuesAtCompileTime = Dynamic }; SparseFunctor(int inputs, int values) : m_inputs(inputs), m_values(values) {} int inputs() const { return m_inputs; } int values() const { return m_values; } const int m_inputs, m_values; //int operator()(const InputType &x, ValueType& fvec) { } // to be defined in the functor //int df(const InputType &x, JacobianType& fjac) { } // to be defined in the functor if no automatic differentiation }; namespace internal { template void lmpar2(const QRSolver &qr, const VectorType &diag, const VectorType &qtb, typename VectorType::Scalar m_delta, typename VectorType::Scalar &par, VectorType &x); } /** * \ingroup NonLinearOptimization_Module * \brief Performs non linear optimization over a non-linear function, * using a variant of the Levenberg Marquardt algorithm. * * Check wikipedia for more information. * http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm */ template class LevenbergMarquardt : internal::no_assignment_operator { public: typedef _FunctorType FunctorType; typedef typename FunctorType::QRSolver QRSolver; typedef typename FunctorType::JacobianType JacobianType; typedef typename JacobianType::Scalar Scalar; typedef typename JacobianType::RealScalar RealScalar; typedef typename QRSolver::StorageIndex PermIndex; typedef Matrix FVectorType; typedef PermutationMatrix PermutationType; public: LevenbergMarquardt(FunctorType& functor) : m_functor(functor),m_nfev(0),m_njev(0),m_fnorm(0.0),m_gnorm(0), m_isInitialized(false),m_info(InvalidInput) { resetParameters(); m_useExternalScaling=false; } LevenbergMarquardtSpace::Status minimize(FVectorType &x); LevenbergMarquardtSpace::Status minimizeInit(FVectorType &x); LevenbergMarquardtSpace::Status minimizeOneStep(FVectorType &x); LevenbergMarquardtSpace::Status lmder1( FVectorType &x, const Scalar tol = std::sqrt(NumTraits::epsilon()) ); static LevenbergMarquardtSpace::Status lmdif1( FunctorType &functor, FVectorType &x, Index *nfev, const Scalar tol = std::sqrt(NumTraits::epsilon()) ); /** Sets the default parameters */ void resetParameters() { using std::sqrt; m_factor = 100.; m_maxfev = 400; m_ftol = sqrt(NumTraits::epsilon()); m_xtol = sqrt(NumTraits::epsilon()); m_gtol = 0. ; m_epsfcn = 0. ; } /** Sets the tolerance for the norm of the solution vector*/ void setXtol(RealScalar xtol) { m_xtol = xtol; } /** Sets the tolerance for the norm of the vector function*/ void setFtol(RealScalar ftol) { m_ftol = ftol; } /** Sets the tolerance for the norm of the gradient of the error vector*/ void setGtol(RealScalar gtol) { m_gtol = gtol; } /** Sets the step bound for the diagonal shift */ void setFactor(RealScalar factor) { m_factor = factor; } /** Sets the error precision */ void setEpsilon (RealScalar epsfcn) { m_epsfcn = epsfcn; } /** Sets the maximum number of function evaluation */ void setMaxfev(Index maxfev) {m_maxfev = maxfev; } /** Use an external Scaling. If set to true, pass a nonzero diagonal to diag() */ void setExternalScaling(bool value) {m_useExternalScaling = value; } /** \returns the tolerance for the norm of the solution vector */ RealScalar xtol() const {return m_xtol; } /** \returns the tolerance for the norm of the vector function */ RealScalar ftol() const {return m_ftol; } /** \returns the tolerance for the norm of the gradient of the error vector */ RealScalar gtol() const {return m_gtol; } /** \returns the step bound for the diagonal shift */ RealScalar factor() const {return m_factor; } /** \returns the error precision */ RealScalar epsilon() const {return m_epsfcn; } /** \returns the maximum number of function evaluation */ Index maxfev() const {return m_maxfev; } /** \returns a reference to the diagonal of the jacobian */ FVectorType& diag() {return m_diag; } /** \returns the number of iterations performed */ Index iterations() { return m_iter; } /** \returns the number of functions evaluation */ Index nfev() { return m_nfev; } /** \returns the number of jacobian evaluation */ Index njev() { return m_njev; } /** \returns the norm of current vector function */ RealScalar fnorm() {return m_fnorm; } /** \returns the norm of the gradient of the error */ RealScalar gnorm() {return m_gnorm; } /** \returns the LevenbergMarquardt parameter */ RealScalar lm_param(void) { return m_par; } /** \returns a reference to the current vector function */ FVectorType& fvec() {return m_fvec; } /** \returns a reference to the matrix where the current Jacobian matrix is stored */ JacobianType& jacobian() {return m_fjac; } /** \returns a reference to the triangular matrix R from the QR of the jacobian matrix. * \sa jacobian() */ JacobianType& matrixR() {return m_rfactor; } /** the permutation used in the QR factorization */ PermutationType permutation() {return m_permutation; } /** * \brief Reports whether the minimization was successful * \returns \c Success if the minimization was succesful, * \c NumericalIssue if a numerical problem arises during the * minimization process, for exemple during the QR factorization * \c NoConvergence if the minimization did not converge after * the maximum number of function evaluation allowed * \c InvalidInput if the input matrix is invalid */ ComputationInfo info() const { return m_info; } private: JacobianType m_fjac; JacobianType m_rfactor; // The triangular matrix R from the QR of the jacobian matrix m_fjac FunctorType &m_functor; FVectorType m_fvec, m_qtf, m_diag; Index n; Index m; Index m_nfev; Index m_njev; RealScalar m_fnorm; // Norm of the current vector function RealScalar m_gnorm; //Norm of the gradient of the error RealScalar m_factor; // Index m_maxfev; // Maximum number of function evaluation RealScalar m_ftol; //Tolerance in the norm of the vector function RealScalar m_xtol; // RealScalar m_gtol; //tolerance of the norm of the error gradient RealScalar m_epsfcn; // Index m_iter; // Number of iterations performed RealScalar m_delta; bool m_useExternalScaling; PermutationType m_permutation; FVectorType m_wa1, m_wa2, m_wa3, m_wa4; //Temporary vectors RealScalar m_par; bool m_isInitialized; // Check whether the minimization step has been called ComputationInfo m_info; }; template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimize(FVectorType &x) { LevenbergMarquardtSpace::Status status = minimizeInit(x); if (status==LevenbergMarquardtSpace::ImproperInputParameters) { m_isInitialized = true; return status; } do { // std::cout << " uv " << x.transpose() << "\n"; status = minimizeOneStep(x); } while (status==LevenbergMarquardtSpace::Running); m_isInitialized = true; return status; } template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimizeInit(FVectorType &x) { n = x.size(); m = m_functor.values(); m_wa1.resize(n); m_wa2.resize(n); m_wa3.resize(n); m_wa4.resize(m); m_fvec.resize(m); //FIXME Sparse Case : Allocate space for the jacobian m_fjac.resize(m, n); // m_fjac.reserve(VectorXi::Constant(n,5)); // FIXME Find a better alternative if (!m_useExternalScaling) m_diag.resize(n); eigen_assert( (!m_useExternalScaling || m_diag.size()==n) && "When m_useExternalScaling is set, the caller must provide a valid 'm_diag'"); m_qtf.resize(n); /* Function Body */ m_nfev = 0; m_njev = 0; /* check the input parameters for errors. */ if (n <= 0 || m < n || m_ftol < 0. || m_xtol < 0. || m_gtol < 0. || m_maxfev <= 0 || m_factor <= 0.){ m_info = InvalidInput; return LevenbergMarquardtSpace::ImproperInputParameters; } if (m_useExternalScaling) for (Index j = 0; j < n; ++j) if (m_diag[j] <= 0.) { m_info = InvalidInput; return LevenbergMarquardtSpace::ImproperInputParameters; } /* evaluate the function at the starting point */ /* and calculate its norm. */ m_nfev = 1; if ( m_functor(x, m_fvec) < 0) return LevenbergMarquardtSpace::UserAsked; m_fnorm = m_fvec.stableNorm(); /* initialize levenberg-marquardt parameter and iteration counter. */ m_par = 0.; m_iter = 1; return LevenbergMarquardtSpace::NotStarted; } template LevenbergMarquardtSpace::Status LevenbergMarquardt::lmder1( FVectorType &x, const Scalar tol ) { n = x.size(); m = m_functor.values(); /* check the input parameters for errors. */ if (n <= 0 || m < n || tol < 0.) return LevenbergMarquardtSpace::ImproperInputParameters; resetParameters(); m_ftol = tol; m_xtol = tol; m_maxfev = 100*(n+1); return minimize(x); } template LevenbergMarquardtSpace::Status LevenbergMarquardt::lmdif1( FunctorType &functor, FVectorType &x, Index *nfev, const Scalar tol ) { Index n = x.size(); Index m = functor.values(); /* check the input parameters for errors. */ if (n <= 0 || m < n || tol < 0.) return LevenbergMarquardtSpace::ImproperInputParameters; NumericalDiff numDiff(functor); // embedded LevenbergMarquardt LevenbergMarquardt > lm(numDiff); lm.setFtol(tol); lm.setXtol(tol); lm.setMaxfev(200*(n+1)); LevenbergMarquardtSpace::Status info = LevenbergMarquardtSpace::Status(lm.minimize(x)); if (nfev) * nfev = lm.nfev(); return info; } } // end namespace Eigen #endif // EIGEN_LEVENBERGMARQUARDT_H RcppEigen/inst/include/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h0000644000176200001440000001522413100733213026453 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Thomas Capricelli // Copyright (C) 2012 Desire Nuentsa // // This code initially comes from MINPACK whose original authors are: // Copyright Jorge More - Argonne National Laboratory // Copyright Burt Garbow - Argonne National Laboratory // Copyright Ken Hillstrom - Argonne National Laboratory // // This Source Code Form is subject to the terms of the Minpack license // (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file. #ifndef EIGEN_LMQRSOLV_H #define EIGEN_LMQRSOLV_H namespace Eigen { namespace internal { template void lmqrsolv( Matrix &s, const PermutationMatrix &iPerm, const Matrix &diag, const Matrix &qtb, Matrix &x, Matrix &sdiag) { /* Local variables */ Index i, j, k; Scalar temp; Index n = s.cols(); Matrix wa(n); JacobiRotation givens; /* Function Body */ // the following will only change the lower triangular part of s, including // the diagonal, though the diagonal is restored afterward /* copy r and (q transpose)*b to preserve input and initialize s. */ /* in particular, save the diagonal elements of r in x. */ x = s.diagonal(); wa = qtb; s.topLeftCorner(n,n).template triangularView() = s.topLeftCorner(n,n).transpose(); /* eliminate the diagonal matrix d using a givens rotation. */ for (j = 0; j < n; ++j) { /* prepare the row of d to be eliminated, locating the */ /* diagonal element using p from the qr factorization. */ const PermIndex l = iPerm.indices()(j); if (diag[l] == 0.) break; sdiag.tail(n-j).setZero(); sdiag[j] = diag[l]; /* the transformations to eliminate the row of d */ /* modify only a single element of (q transpose)*b */ /* beyond the first n, which is initially zero. */ Scalar qtbpj = 0.; for (k = j; k < n; ++k) { /* determine a givens rotation which eliminates the */ /* appropriate element in the current row of d. */ givens.makeGivens(-s(k,k), sdiag[k]); /* compute the modified diagonal element of r and */ /* the modified element of ((q transpose)*b,0). */ s(k,k) = givens.c() * s(k,k) + givens.s() * sdiag[k]; temp = givens.c() * wa[k] + givens.s() * qtbpj; qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj; wa[k] = temp; /* accumulate the tranformation in the row of s. */ for (i = k+1; i().solveInPlace(wa.head(nsing)); // restore sdiag = s.diagonal(); s.diagonal() = x; /* permute the components of z back to components of x. */ x = iPerm * wa; } template void lmqrsolv( SparseMatrix &s, const PermutationMatrix &iPerm, const Matrix &diag, const Matrix &qtb, Matrix &x, Matrix &sdiag) { /* Local variables */ typedef SparseMatrix FactorType; Index i, j, k, l; Scalar temp; Index n = s.cols(); Matrix wa(n); JacobiRotation givens; /* Function Body */ // the following will only change the lower triangular part of s, including // the diagonal, though the diagonal is restored afterward /* copy r and (q transpose)*b to preserve input and initialize R. */ wa = qtb; FactorType R(s); // Eliminate the diagonal matrix d using a givens rotation for (j = 0; j < n; ++j) { // Prepare the row of d to be eliminated, locating the // diagonal element using p from the qr factorization l = iPerm.indices()(j); if (diag(l) == Scalar(0)) break; sdiag.tail(n-j).setZero(); sdiag[j] = diag[l]; // the transformations to eliminate the row of d // modify only a single element of (q transpose)*b // beyond the first n, which is initially zero. Scalar qtbpj = 0; // Browse the nonzero elements of row j of the upper triangular s for (k = j; k < n; ++k) { typename FactorType::InnerIterator itk(R,k); for (; itk; ++itk){ if (itk.index() < k) continue; else break; } //At this point, we have the diagonal element R(k,k) // Determine a givens rotation which eliminates // the appropriate element in the current row of d givens.makeGivens(-itk.value(), sdiag(k)); // Compute the modified diagonal element of r and // the modified element of ((q transpose)*b,0). itk.valueRef() = givens.c() * itk.value() + givens.s() * sdiag(k); temp = givens.c() * wa(k) + givens.s() * qtbpj; qtbpj = -givens.s() * wa(k) + givens.c() * qtbpj; wa(k) = temp; // Accumulate the transformation in the remaining k row/column of R for (++itk; itk; ++itk) { i = itk.index(); temp = givens.c() * itk.value() + givens.s() * sdiag(i); sdiag(i) = -givens.s() * itk.value() + givens.c() * sdiag(i); itk.valueRef() = temp; } } } // Solve the triangular system for z. If the system is // singular, then obtain a least squares solution Index nsing; for(nsing = 0; nsing().solve/*InPlace*/(wa.head(nsing)); sdiag = R.diagonal(); // Permute the components of z back to components of x x = iPerm * wa; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_LMQRSOLV_H RcppEigen/inst/include/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h0000644000176200001440000001165713100733213025715 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This code initially comes from MINPACK whose original authors are: // Copyright Jorge More - Argonne National Laboratory // Copyright Burt Garbow - Argonne National Laboratory // Copyright Ken Hillstrom - Argonne National Laboratory // // This Source Code Form is subject to the terms of the Minpack license // (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file. #ifndef EIGEN_LMPAR_H #define EIGEN_LMPAR_H namespace Eigen { namespace internal { template void lmpar2( const QRSolver &qr, const VectorType &diag, const VectorType &qtb, typename VectorType::Scalar m_delta, typename VectorType::Scalar &par, VectorType &x) { using std::sqrt; using std::abs; typedef typename QRSolver::MatrixType MatrixType; typedef typename QRSolver::Scalar Scalar; // typedef typename QRSolver::StorageIndex StorageIndex; /* Local variables */ Index j; Scalar fp; Scalar parc, parl; Index iter; Scalar temp, paru; Scalar gnorm; Scalar dxnorm; // Make a copy of the triangular factor. // This copy is modified during call the qrsolv MatrixType s; s = qr.matrixR(); /* Function Body */ const Scalar dwarf = (std::numeric_limits::min)(); const Index n = qr.matrixR().cols(); eigen_assert(n==diag.size()); eigen_assert(n==qtb.size()); VectorType wa1, wa2; /* compute and store in x the gauss-newton direction. if the */ /* jacobian is rank-deficient, obtain a least squares solution. */ // const Index rank = qr.nonzeroPivots(); // exactly double(0.) const Index rank = qr.rank(); // use a threshold wa1 = qtb; wa1.tail(n-rank).setZero(); //FIXME There is no solve in place for sparse triangularView wa1.head(rank) = s.topLeftCorner(rank,rank).template triangularView().solve(qtb.head(rank)); x = qr.colsPermutation()*wa1; /* initialize the iteration counter. */ /* evaluate the function at the origin, and test */ /* for acceptance of the gauss-newton direction. */ iter = 0; wa2 = diag.cwiseProduct(x); dxnorm = wa2.blueNorm(); fp = dxnorm - m_delta; if (fp <= Scalar(0.1) * m_delta) { par = 0; return; } /* if the jacobian is not rank deficient, the newton */ /* step provides a lower bound, parl, for the zero of */ /* the function. otherwise set this bound to zero. */ parl = 0.; if (rank==n) { wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2)/dxnorm; s.topLeftCorner(n,n).transpose().template triangularView().solveInPlace(wa1); temp = wa1.blueNorm(); parl = fp / m_delta / temp / temp; } /* calculate an upper bound, paru, for the zero of the function. */ for (j = 0; j < n; ++j) wa1[j] = s.col(j).head(j+1).dot(qtb.head(j+1)) / diag[qr.colsPermutation().indices()(j)]; gnorm = wa1.stableNorm(); paru = gnorm / m_delta; if (paru == 0.) paru = dwarf / (std::min)(m_delta,Scalar(0.1)); /* if the input par lies outside of the interval (parl,paru), */ /* set par to the closer endpoint. */ par = (std::max)(par,parl); par = (std::min)(par,paru); if (par == 0.) par = gnorm / dxnorm; /* beginning of an iteration. */ while (true) { ++iter; /* evaluate the function at the current value of par. */ if (par == 0.) par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */ wa1 = sqrt(par)* diag; VectorType sdiag(n); lmqrsolv(s, qr.colsPermutation(), wa1, qtb, x, sdiag); wa2 = diag.cwiseProduct(x); dxnorm = wa2.blueNorm(); temp = fp; fp = dxnorm - m_delta; /* if the function is small enough, accept the current value */ /* of par. also test for the exceptional cases where parl */ /* is zero or the number of iterations has reached 10. */ if (abs(fp) <= Scalar(0.1) * m_delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10) break; /* compute the newton correction. */ wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2/dxnorm); // we could almost use this here, but the diagonal is outside qr, in sdiag[] for (j = 0; j < n; ++j) { wa1[j] /= sdiag[j]; temp = wa1[j]; for (Index i = j+1; i < n; ++i) wa1[i] -= s.coeff(i,j) * temp; } temp = wa1.blueNorm(); parc = fp / m_delta / temp / temp; /* depending on the sign of the function, update parl or paru. */ if (fp > 0.) parl = (std::max)(parl,par); if (fp < 0.) paru = (std::min)(paru,par); /* compute an improved estimate for par. */ par = (std::max)(parl,par+parc); } if (iter == 0) par = 0.; return; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_LMPAR_H RcppEigen/inst/include/unsupported/Eigen/src/LevenbergMarquardt/CopyrightMINPACK.txt0000644000176200001440000000422212253717461030252 0ustar liggesusersMinpack Copyright Notice (1999) University of Chicago. All rights reserved Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The end-user documentation included with the redistribution, if any, must include the following acknowledgment: "This product includes software developed by the University of Chicago, as Operator of Argonne National Laboratory. Alternately, this acknowledgment may appear in the software itself, if and wherever such third-party acknowledgments normally appear. 4. WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS" WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4) DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL BE CORRECTED. 5. LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT, INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE, EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE POSSIBILITY OF SUCH LOSS OR DAMAGES. RcppEigen/inst/include/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h0000644000176200001440000000461313100733213026237 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This code initially comes from MINPACK whose original authors are: // Copyright Jorge More - Argonne National Laboratory // Copyright Burt Garbow - Argonne National Laboratory // Copyright Ken Hillstrom - Argonne National Laboratory // // This Source Code Form is subject to the terms of the Minpack license // (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file. #ifndef EIGEN_LMCOVAR_H #define EIGEN_LMCOVAR_H namespace Eigen { namespace internal { template void covar( Matrix< Scalar, Dynamic, Dynamic > &r, const VectorXi& ipvt, Scalar tol = std::sqrt(NumTraits::epsilon()) ) { using std::abs; /* Local variables */ Index i, j, k, l, ii, jj; bool sing; Scalar temp; /* Function Body */ const Index n = r.cols(); const Scalar tolr = tol * abs(r(0,0)); Matrix< Scalar, Dynamic, 1 > wa(n); eigen_assert(ipvt.size()==n); /* form the inverse of r in the full upper triangle of r. */ l = -1; for (k = 0; k < n; ++k) if (abs(r(k,k)) > tolr) { r(k,k) = 1. / r(k,k); for (j = 0; j <= k-1; ++j) { temp = r(k,k) * r(j,k); r(j,k) = 0.; r.col(k).head(j+1) -= r.col(j).head(j+1) * temp; } l = k; } /* form the full upper triangle of the inverse of (r transpose)*r */ /* in the full upper triangle of r. */ for (k = 0; k <= l; ++k) { for (j = 0; j <= k-1; ++j) r.col(j).head(j+1) += r.col(k).head(j+1) * r(j,k); r.col(k).head(k+1) *= r(k,k); } /* form the full lower triangle of the covariance matrix */ /* in the strict lower triangle of r and in wa. */ for (j = 0; j < n; ++j) { jj = ipvt[j]; sing = j > l; for (i = 0; i <= j; ++i) { if (sing) r(i,j) = 0.; ii = ipvt[i]; if (ii > jj) r(ii,jj) = r(i,j); if (ii < jj) r(jj,ii) = r(i,j); } wa[jj] = r(j,j); } /* symmetrize the covariance matrix in r. */ r.topLeftCorner(n,n).template triangularView() = r.topLeftCorner(n,n).transpose(); r.diagonal() = wa; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_LMCOVAR_H RcppEigen/inst/include/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h0000644000176200001440000001477012253717461026626 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Thomas Capricelli // // This code initially comes from MINPACK whose original authors are: // Copyright Jorge More - Argonne National Laboratory // Copyright Burt Garbow - Argonne National Laboratory // Copyright Ken Hillstrom - Argonne National Laboratory // // This Source Code Form is subject to the terms of the Minpack license // (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file. #ifndef EIGEN_LMONESTEP_H #define EIGEN_LMONESTEP_H namespace Eigen { template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimizeOneStep(FVectorType &x) { using std::abs; using std::sqrt; RealScalar temp, temp1,temp2; RealScalar ratio; RealScalar pnorm, xnorm, fnorm1, actred, dirder, prered; eigen_assert(x.size()==n); // check the caller is not cheating us temp = 0.0; xnorm = 0.0; /* calculate the jacobian matrix. */ Index df_ret = m_functor.df(x, m_fjac); if (df_ret<0) return LevenbergMarquardtSpace::UserAsked; if (df_ret>0) // numerical diff, we evaluated the function df_ret times m_nfev += df_ret; else m_njev++; /* compute the qr factorization of the jacobian. */ for (int j = 0; j < x.size(); ++j) m_wa2(j) = m_fjac.col(j).blueNorm(); QRSolver qrfac(m_fjac); if(qrfac.info() != Success) { m_info = NumericalIssue; return LevenbergMarquardtSpace::ImproperInputParameters; } // Make a copy of the first factor with the associated permutation m_rfactor = qrfac.matrixR(); m_permutation = (qrfac.colsPermutation()); /* on the first iteration and if external scaling is not used, scale according */ /* to the norms of the columns of the initial jacobian. */ if (m_iter == 1) { if (!m_useExternalScaling) for (Index j = 0; j < n; ++j) m_diag[j] = (m_wa2[j]==0.)? 1. : m_wa2[j]; /* on the first iteration, calculate the norm of the scaled x */ /* and initialize the step bound m_delta. */ xnorm = m_diag.cwiseProduct(x).stableNorm(); m_delta = m_factor * xnorm; if (m_delta == 0.) m_delta = m_factor; } /* form (q transpose)*m_fvec and store the first n components in */ /* m_qtf. */ m_wa4 = m_fvec; m_wa4 = qrfac.matrixQ().adjoint() * m_fvec; m_qtf = m_wa4.head(n); /* compute the norm of the scaled gradient. */ m_gnorm = 0.; if (m_fnorm != 0.) for (Index j = 0; j < n; ++j) if (m_wa2[m_permutation.indices()[j]] != 0.) m_gnorm = (std::max)(m_gnorm, abs( m_rfactor.col(j).head(j+1).dot(m_qtf.head(j+1)/m_fnorm) / m_wa2[m_permutation.indices()[j]])); /* test for convergence of the gradient norm. */ if (m_gnorm <= m_gtol) { m_info = Success; return LevenbergMarquardtSpace::CosinusTooSmall; } /* rescale if necessary. */ if (!m_useExternalScaling) m_diag = m_diag.cwiseMax(m_wa2); do { /* determine the levenberg-marquardt parameter. */ internal::lmpar2(qrfac, m_diag, m_qtf, m_delta, m_par, m_wa1); /* store the direction p and x + p. calculate the norm of p. */ m_wa1 = -m_wa1; m_wa2 = x + m_wa1; pnorm = m_diag.cwiseProduct(m_wa1).stableNorm(); /* on the first iteration, adjust the initial step bound. */ if (m_iter == 1) m_delta = (std::min)(m_delta,pnorm); /* evaluate the function at x + p and calculate its norm. */ if ( m_functor(m_wa2, m_wa4) < 0) return LevenbergMarquardtSpace::UserAsked; ++m_nfev; fnorm1 = m_wa4.stableNorm(); /* compute the scaled actual reduction. */ actred = -1.; if (Scalar(.1) * fnorm1 < m_fnorm) actred = 1. - numext::abs2(fnorm1 / m_fnorm); /* compute the scaled predicted reduction and */ /* the scaled directional derivative. */ m_wa3 = m_rfactor.template triangularView() * (m_permutation.inverse() *m_wa1); temp1 = numext::abs2(m_wa3.stableNorm() / m_fnorm); temp2 = numext::abs2(sqrt(m_par) * pnorm / m_fnorm); prered = temp1 + temp2 / Scalar(.5); dirder = -(temp1 + temp2); /* compute the ratio of the actual to the predicted */ /* reduction. */ ratio = 0.; if (prered != 0.) ratio = actred / prered; /* update the step bound. */ if (ratio <= Scalar(.25)) { if (actred >= 0.) temp = RealScalar(.5); if (actred < 0.) temp = RealScalar(.5) * dirder / (dirder + RealScalar(.5) * actred); if (RealScalar(.1) * fnorm1 >= m_fnorm || temp < RealScalar(.1)) temp = Scalar(.1); /* Computing MIN */ m_delta = temp * (std::min)(m_delta, pnorm / RealScalar(.1)); m_par /= temp; } else if (!(m_par != 0. && ratio < RealScalar(.75))) { m_delta = pnorm / RealScalar(.5); m_par = RealScalar(.5) * m_par; } /* test for successful iteration. */ if (ratio >= RealScalar(1e-4)) { /* successful iteration. update x, m_fvec, and their norms. */ x = m_wa2; m_wa2 = m_diag.cwiseProduct(x); m_fvec = m_wa4; xnorm = m_wa2.stableNorm(); m_fnorm = fnorm1; ++m_iter; } /* tests for convergence. */ if (abs(actred) <= m_ftol && prered <= m_ftol && Scalar(.5) * ratio <= 1. && m_delta <= m_xtol * xnorm) { m_info = Success; return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall; } if (abs(actred) <= m_ftol && prered <= m_ftol && Scalar(.5) * ratio <= 1.) { m_info = Success; return LevenbergMarquardtSpace::RelativeReductionTooSmall; } if (m_delta <= m_xtol * xnorm) { m_info = Success; return LevenbergMarquardtSpace::RelativeErrorTooSmall; } /* tests for termination and stringent tolerances. */ if (m_nfev >= m_maxfev) { m_info = NoConvergence; return LevenbergMarquardtSpace::TooManyFunctionEvaluation; } if (abs(actred) <= NumTraits::epsilon() && prered <= NumTraits::epsilon() && Scalar(.5) * ratio <= 1.) { m_info = Success; return LevenbergMarquardtSpace::FtolTooSmall; } if (m_delta <= NumTraits::epsilon() * xnorm) { m_info = Success; return LevenbergMarquardtSpace::XtolTooSmall; } if (m_gnorm <= NumTraits::epsilon()) { m_info = Success; return LevenbergMarquardtSpace::GtolTooSmall; } } while (ratio < Scalar(1e-4)); return LevenbergMarquardtSpace::Running; } } // end namespace Eigen #endif // EIGEN_LMONESTEP_H RcppEigen/inst/include/unsupported/Eigen/src/IterativeSolvers/0000755000176200001440000000000013100733213024237 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h0000644000176200001440000001240312305236565030307 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud /* NOTE The functions of this file have been adapted from the GMM++ library */ //======================================================================== // // Copyright (C) 2002-2007 Yves Renard // // This file is a part of GETFEM++ // // Getfem++ is free software; you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as // published by the Free Software Foundation; version 2.1 of the License. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // You should have received a copy of the GNU Lesser General Public // License along with this program; if not, write to the Free Software // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, // USA. // //======================================================================== #include "../../../../Eigen/src/Core/util/NonMPL2.h" #ifndef EIGEN_CONSTRAINEDCG_H #define EIGEN_CONSTRAINEDCG_H #include namespace Eigen { namespace internal { /** \ingroup IterativeSolvers_Module * Compute the pseudo inverse of the non-square matrix C such that * \f$ CINV = (C * C^T)^{-1} * C \f$ based on a conjugate gradient method. * * This function is internally used by constrained_cg. */ template void pseudo_inverse(const CMatrix &C, CINVMatrix &CINV) { // optimisable : copie de la ligne, precalcul de C * trans(C). typedef typename CMatrix::Scalar Scalar; typedef typename CMatrix::Index Index; // FIXME use sparse vectors ? typedef Matrix TmpVec; Index rows = C.rows(), cols = C.cols(); TmpVec d(rows), e(rows), l(cols), p(rows), q(rows), r(rows); Scalar rho, rho_1, alpha; d.setZero(); typedef Triplet T; std::vector tripletList; for (Index i = 0; i < rows; ++i) { d[i] = 1.0; rho = 1.0; e.setZero(); r = d; p = d; while (rho >= 1e-38) { /* conjugate gradient to compute e */ /* which is the i-th row of inv(C * trans(C)) */ l = C.transpose() * p; q = C * l; alpha = rho / p.dot(q); e += alpha * p; r += -alpha * q; rho_1 = rho; rho = r.dot(r); p = (rho/rho_1) * p + r; } l = C.transpose() * e; // l is the i-th row of CINV // FIXME add a generic "prune/filter" expression for both dense and sparse object to sparse for (Index j=0; j void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x, const VectorB& b, const VectorF& f, IterationController &iter) { using std::sqrt; typedef typename TMatrix::Scalar Scalar; typedef typename TMatrix::Index Index; typedef Matrix TmpVec; Scalar rho = 1.0, rho_1, lambda, gamma; Index xSize = x.size(); TmpVec p(xSize), q(xSize), q2(xSize), r(xSize), old_z(xSize), z(xSize), memox(xSize); std::vector satured(C.rows()); p.setZero(); iter.setRhsNorm(sqrt(b.dot(b))); // gael vect_sp(PS, b, b) if (iter.rhsNorm() == 0.0) iter.setRhsNorm(1.0); SparseMatrix CINV(C.rows(), C.cols()); pseudo_inverse(C, CINV); while(true) { // computation of residual old_z = z; memox = x; r = b; r += A * -x; z = r; bool transition = false; for (Index i = 0; i < C.rows(); ++i) { Scalar al = C.row(i).dot(x) - f.coeff(i); if (al >= -1.0E-15) { if (!satured[i]) { satured[i] = true; transition = true; } Scalar bb = CINV.row(i).dot(z); if (bb > 0.0) // FIXME: we should allow that: z += -bb * C.row(i); for (typename CMatrix::InnerIterator it(C,i); it; ++it) z.coeffRef(it.index()) -= bb*it.value(); } else satured[i] = false; } // descent direction rho_1 = rho; rho = r.dot(z); if (iter.finished(rho)) break; if (iter.noiseLevel() > 0 && transition) std::cerr << "CCG: transition\n"; if (transition || iter.first()) gamma = 0.0; else gamma = (std::max)(0.0, (rho - old_z.dot(z)) / rho_1); p = z + gamma*p; ++iter; // one dimensionnal optimization q = A * p; lambda = rho / q.dot(p); for (Index i = 0; i < C.rows(); ++i) { if (!satured[i]) { Scalar bb = C.row(i).dot(p) - f[i]; if (bb > 0.0) lambda = (std::min)(lambda, (f.coeff(i)-C.row(i).dot(x)) / bb); } } x += lambda * p; memox -= x; } } } // end namespace internal } // end namespace Eigen #endif // EIGEN_CONSTRAINEDCG_H RcppEigen/inst/include/unsupported/Eigen/src/IterativeSolvers/IterationController.h0000644000176200001440000001235212253717461030434 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud /* NOTE The class IterationController has been adapted from the iteration * class of the GMM++ and ITL libraries. */ //======================================================================= // Copyright (C) 1997-2001 // Authors: Andrew Lumsdaine // Lie-Quan Lee // // This file is part of the Iterative Template Library // // You should have received a copy of the License Agreement for the // Iterative Template Library along with the software; see the // file LICENSE. // // Permission to modify the code and to distribute modified code is // granted, provided the text of this NOTICE is retained, a notice that // the code was modified is included with the above COPYRIGHT NOTICE and // with the COPYRIGHT NOTICE in the LICENSE file, and that the LICENSE // file is distributed with the modified code. // // LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. // By way of example, but not limitation, Licensor MAKES NO // REPRESENTATIONS OR WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY // PARTICULAR PURPOSE OR THAT THE USE OF THE LICENSED SOFTWARE COMPONENTS // OR DOCUMENTATION WILL NOT INFRINGE ANY PATENTS, COPYRIGHTS, TRADEMARKS // OR OTHER RIGHTS. //======================================================================= //======================================================================== // // Copyright (C) 2002-2007 Yves Renard // // This file is a part of GETFEM++ // // Getfem++ is free software; you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as // published by the Free Software Foundation; version 2.1 of the License. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // You should have received a copy of the GNU Lesser General Public // License along with this program; if not, write to the Free Software // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, // USA. // //======================================================================== #include "../../../../Eigen/src/Core/util/NonMPL2.h" #ifndef EIGEN_ITERATION_CONTROLLER_H #define EIGEN_ITERATION_CONTROLLER_H namespace Eigen { /** \ingroup IterativeSolvers_Module * \class IterationController * * \brief Controls the iterations of the iterative solvers * * This class has been adapted from the iteration class of GMM++ and ITL libraries. * */ class IterationController { protected : double m_rhsn; ///< Right hand side norm size_t m_maxiter; ///< Max. number of iterations int m_noise; ///< if noise > 0 iterations are printed double m_resmax; ///< maximum residual double m_resminreach, m_resadd; size_t m_nit; ///< iteration number double m_res; ///< last computed residual bool m_written; void (*m_callback)(const IterationController&); public : void init() { m_nit = 0; m_res = 0.0; m_written = false; m_resminreach = 1E50; m_resadd = 0.0; m_callback = 0; } IterationController(double r = 1.0E-8, int noi = 0, size_t mit = size_t(-1)) : m_rhsn(1.0), m_maxiter(mit), m_noise(noi), m_resmax(r) { init(); } void operator ++(int) { m_nit++; m_written = false; m_resadd += m_res; } void operator ++() { (*this)++; } bool first() { return m_nit == 0; } /* get/set the "noisyness" (verbosity) of the solvers */ int noiseLevel() const { return m_noise; } void setNoiseLevel(int n) { m_noise = n; } void reduceNoiseLevel() { if (m_noise > 0) m_noise--; } double maxResidual() const { return m_resmax; } void setMaxResidual(double r) { m_resmax = r; } double residual() const { return m_res; } /* change the user-definable callback, called after each iteration */ void setCallback(void (*t)(const IterationController&)) { m_callback = t; } size_t iteration() const { return m_nit; } void setIteration(size_t i) { m_nit = i; } size_t maxIterarions() const { return m_maxiter; } void setMaxIterations(size_t i) { m_maxiter = i; } double rhsNorm() const { return m_rhsn; } void setRhsNorm(double r) { m_rhsn = r; } bool converged() const { return m_res <= m_rhsn * m_resmax; } bool converged(double nr) { using std::abs; m_res = abs(nr); m_resminreach = (std::min)(m_resminreach, m_res); return converged(); } template bool converged(const VectorType &v) { return converged(v.squaredNorm()); } bool finished(double nr) { if (m_callback) m_callback(*this); if (m_noise > 0 && !m_written) { converged(nr); m_written = true; } return (m_nit >= m_maxiter || converged(nr)); } template bool finished(const MatrixBase &v) { return finished(double(v.squaredNorm())); } }; } // end namespace Eigen #endif // EIGEN_ITERATION_CONTROLLER_H RcppEigen/inst/include/unsupported/Eigen/src/IterativeSolvers/GMRES.h0000644000176200001440000002431213100733213025267 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Gael Guennebaud // Copyright (C) 2012, 2014 Kolja Brix // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GMRES_H #define EIGEN_GMRES_H namespace Eigen { namespace internal { /** * Generalized Minimal Residual Algorithm based on the * Arnoldi algorithm implemented with Householder reflections. * * Parameters: * \param mat matrix of linear system of equations * \param Rhs right hand side vector of linear system of equations * \param x on input: initial guess, on output: solution * \param precond preconditioner used * \param iters on input: maximum number of iterations to perform * on output: number of iterations performed * \param restart number of iterations for a restart * \param tol_error on input: relative residual tolerance * on output: residuum achieved * * \sa IterativeMethods::bicgstab() * * * For references, please see: * * Saad, Y. and Schultz, M. H. * GMRES: A Generalized Minimal Residual Algorithm for Solving Nonsymmetric Linear Systems. * SIAM J.Sci.Stat.Comp. 7, 1986, pp. 856 - 869. * * Saad, Y. * Iterative Methods for Sparse Linear Systems. * Society for Industrial and Applied Mathematics, Philadelphia, 2003. * * Walker, H. F. * Implementations of the GMRES method. * Comput.Phys.Comm. 53, 1989, pp. 311 - 320. * * Walker, H. F. * Implementation of the GMRES Method using Householder Transformations. * SIAM J.Sci.Stat.Comp. 9, 1988, pp. 152 - 163. * */ template bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Preconditioner & precond, Index &iters, const Index &restart, typename Dest::RealScalar & tol_error) { using std::sqrt; using std::abs; typedef typename Dest::RealScalar RealScalar; typedef typename Dest::Scalar Scalar; typedef Matrix < Scalar, Dynamic, 1 > VectorType; typedef Matrix < Scalar, Dynamic, Dynamic, ColMajor> FMatrixType; RealScalar tol = tol_error; const Index maxIters = iters; iters = 0; const Index m = mat.rows(); // residual and preconditioned residual VectorType p0 = rhs - mat*x; VectorType r0 = precond.solve(p0); const RealScalar r0Norm = r0.norm(); // is initial guess already good enough? if(r0Norm == 0) { tol_error = 0; return true; } // storage for Hessenberg matrix and Householder data FMatrixType H = FMatrixType::Zero(m, restart + 1); VectorType w = VectorType::Zero(restart + 1); VectorType tau = VectorType::Zero(restart + 1); // storage for Jacobi rotations std::vector < JacobiRotation < Scalar > > G(restart); // storage for temporaries VectorType t(m), v(m), workspace(m), x_new(m); // generate first Householder vector Ref H0_tail = H.col(0).tail(m - 1); RealScalar beta; r0.makeHouseholder(H0_tail, tau.coeffRef(0), beta); w(0) = Scalar(beta); for (Index k = 1; k <= restart; ++k) { ++iters; v = VectorType::Unit(m, k - 1); // apply Householder reflections H_{1} ... H_{k-1} to v // TODO: use a HouseholderSequence for (Index i = k - 1; i >= 0; --i) { v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); } // apply matrix M to v: v = mat * v; t.noalias() = mat * v; v = precond.solve(t); // apply Householder reflections H_{k-1} ... H_{1} to v // TODO: use a HouseholderSequence for (Index i = 0; i < k; ++i) { v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); } if (v.tail(m - k).norm() != 0.0) { if (k <= restart) { // generate new Householder vector Ref Hk_tail = H.col(k).tail(m - k - 1); v.tail(m - k).makeHouseholder(Hk_tail, tau.coeffRef(k), beta); // apply Householder reflection H_{k} to v v.tail(m - k).applyHouseholderOnTheLeft(Hk_tail, tau.coeffRef(k), workspace.data()); } } if (k > 1) { for (Index i = 0; i < k - 1; ++i) { // apply old Givens rotations to v v.applyOnTheLeft(i, i + 1, G[i].adjoint()); } } if (k y = w.head(k); H.topLeftCorner(k, k).template triangularView ().solveInPlace(y); // use Horner-like scheme to calculate solution vector x_new.setZero(); for (Index i = k - 1; i >= 0; --i) { x_new(i) += y(i); // apply Householder reflection H_{i} to x_new x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); } x += x_new; if(stop) { return true; } else { k=0; // reset data for restart p0.noalias() = rhs - mat*x; r0 = precond.solve(p0); // clear Hessenberg matrix and Householder data H.setZero(); w.setZero(); tau.setZero(); // generate first Householder vector r0.makeHouseholder(H0_tail, tau.coeffRef(0), beta); w(0) = Scalar(beta); } } } return false; } } template< typename _MatrixType, typename _Preconditioner = DiagonalPreconditioner > class GMRES; namespace internal { template< typename _MatrixType, typename _Preconditioner> struct traits > { typedef _MatrixType MatrixType; typedef _Preconditioner Preconditioner; }; } /** \ingroup IterativeLinearSolvers_Module * \brief A GMRES solver for sparse square problems * * This class allows to solve for A.x = b sparse linear problems using a generalized minimal * residual method. The vectors x and b can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner * * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations * and NumTraits::epsilon() for the tolerance. * * This class can be used as the direct solver classes. Here is a typical usage example: * \code * int n = 10000; * VectorXd x(n), b(n); * SparseMatrix A(n,n); * // fill A and b * GMRES > solver(A); * x = solver.solve(b); * std::cout << "#iterations: " << solver.iterations() << std::endl; * std::cout << "estimated error: " << solver.error() << std::endl; * // update b, and solve again * x = solver.solve(b); * \endcode * * By default the iterations start with x=0 as an initial guess of the solution. * One can control the start using the solveWithGuess() method. * * GMRES can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. * * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner */ template< typename _MatrixType, typename _Preconditioner> class GMRES : public IterativeSolverBase > { typedef IterativeSolverBase Base; using Base::matrix; using Base::m_error; using Base::m_iterations; using Base::m_info; using Base::m_isInitialized; private: Index m_restart; public: using Base::_solve_impl; typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef _Preconditioner Preconditioner; public: /** Default constructor. */ GMRES() : Base(), m_restart(30) {} /** Initialize the solver with matrix \a A for further \c Ax=b solving. * * This constructor is a shortcut for the default constructor followed * by a call to compute(). * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template explicit GMRES(const EigenBase& A) : Base(A.derived()), m_restart(30) {} ~GMRES() {} /** Get the number of iterations after that a restart is performed. */ Index get_restart() { return m_restart; } /** Set the number of iterations after that a restart is performed. * \param restart number of iterations for a restarti, default is 30. */ void set_restart(const Index restart) { m_restart=restart; } /** \internal */ template void _solve_with_guess_impl(const Rhs& b, Dest& x) const { bool failed = false; for(Index j=0; j void _solve_impl(const Rhs& b, MatrixBase &x) const { x = b; if(x.squaredNorm() == 0) return; // Check Zero right hand side _solve_with_guess_impl(b,x.derived()); } protected: }; } // end namespace Eigen #endif // EIGEN_GMRES_H RcppEigen/inst/include/unsupported/Eigen/src/IterativeSolvers/Scaling.h0000644000176200001440000001315313100733213025773 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Desire NUENTSA WAKAM A; * // fill A and b; * IterScaling > scal; * // Compute the left and right scaling vectors. The matrix is equilibrated at output * scal.computeRef(A); * // Scale the right hand side * b = scal.LeftScaling().cwiseProduct(b); * // Now, solve the equilibrated linear system with any available solver * * // Scale back the computed solution * x = scal.RightScaling().cwiseProduct(x); * \endcode * * \tparam _MatrixType the type of the matrix. It should be a real square sparsematrix * * References : D. Ruiz and B. Ucar, A Symmetry Preserving Algorithm for Matrix Scaling, INRIA Research report RR-7552 * * \sa \ref IncompleteLUT */ template class IterScaling { public: typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; public: IterScaling() { init(); } IterScaling(const MatrixType& matrix) { init(); compute(matrix); } ~IterScaling() { } /** * Compute the left and right diagonal matrices to scale the input matrix @p mat * * FIXME This algorithm will be modified such that the diagonal elements are permuted on the diagonal. * * \sa LeftScaling() RightScaling() */ void compute (const MatrixType& mat) { using std::abs; int m = mat.rows(); int n = mat.cols(); eigen_assert((m>0 && m == n) && "Please give a non - empty matrix"); m_left.resize(m); m_right.resize(n); m_left.setOnes(); m_right.setOnes(); m_matrix = mat; VectorXd Dr, Dc, DrRes, DcRes; // Temporary Left and right scaling vectors Dr.resize(m); Dc.resize(n); DrRes.resize(m); DcRes.resize(n); double EpsRow = 1.0, EpsCol = 1.0; int its = 0; do { // Iterate until the infinite norm of each row and column is approximately 1 // Get the maximum value in each row and column Dr.setZero(); Dc.setZero(); for (int k=0; km_tol || EpsCol > m_tol) && (its < m_maxits) ); m_isInitialized = true; } /** Compute the left and right vectors to scale the vectors * the input matrix is scaled with the computed vectors at output * * \sa compute() */ void computeRef (MatrixType& mat) { compute (mat); mat = m_matrix; } /** Get the vector to scale the rows of the matrix */ VectorXd& LeftScaling() { return m_left; } /** Get the vector to scale the columns of the matrix */ VectorXd& RightScaling() { return m_right; } /** Set the tolerance for the convergence of the iterative scaling algorithm */ void setTolerance(double tol) { m_tol = tol; } protected: void init() { m_tol = 1e-10; m_maxits = 5; m_isInitialized = false; } MatrixType m_matrix; mutable ComputationInfo m_info; bool m_isInitialized; VectorXd m_left; // Left scaling vector VectorXd m_right; // m_right scaling vector double m_tol; int m_maxits; // Maximum number of iterations allowed }; } #endif RcppEigen/inst/include/unsupported/Eigen/src/IterativeSolvers/DGMRES.h0000644000176200001440000004261513100733213025401 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DGMRES_H #define EIGEN_DGMRES_H #include namespace Eigen { template< typename _MatrixType, typename _Preconditioner = DiagonalPreconditioner > class DGMRES; namespace internal { template< typename _MatrixType, typename _Preconditioner> struct traits > { typedef _MatrixType MatrixType; typedef _Preconditioner Preconditioner; }; /** \brief Computes a permutation vector to have a sorted sequence * \param vec The vector to reorder. * \param perm gives the sorted sequence on output. Must be initialized with 0..n-1 * \param ncut Put the ncut smallest elements at the end of the vector * WARNING This is an expensive sort, so should be used only * for small size vectors * TODO Use modified QuickSplit or std::nth_element to get the smallest values */ template void sortWithPermutation (VectorType& vec, IndexType& perm, typename IndexType::Scalar& ncut) { eigen_assert(vec.size() == perm.size()); typedef typename IndexType::Scalar Index; bool flag; for (Index k = 0; k < ncut; k++) { flag = false; for (Index j = 0; j < vec.size()-1; j++) { if ( vec(perm(j)) < vec(perm(j+1)) ) { std::swap(perm(j),perm(j+1)); flag = true; } if (!flag) break; // The vector is in sorted order } } } } /** * \ingroup IterativeLInearSolvers_Module * \brief A Restarted GMRES with deflation. * This class implements a modification of the GMRES solver for * sparse linear systems. The basis is built with modified * Gram-Schmidt. At each restart, a few approximated eigenvectors * corresponding to the smallest eigenvalues are used to build a * preconditioner for the next cycle. This preconditioner * for deflation can be combined with any other preconditioner, * the IncompleteLUT for instance. The preconditioner is applied * at right of the matrix and the combination is multiplicative. * * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner * Typical usage : * \code * SparseMatrix A; * VectorXd x, b; * //Fill A and b ... * DGMRES > solver; * solver.set_restart(30); // Set restarting value * solver.setEigenv(1); // Set the number of eigenvalues to deflate * solver.compute(A); * x = solver.solve(b); * \endcode * * DGMRES can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. * * References : * [1] D. NUENTSA WAKAM and F. PACULL, Memory Efficient Hybrid * Algebraic Solvers for Linear Systems Arising from Compressible * Flows, Computers and Fluids, In Press, * http://dx.doi.org/10.1016/j.compfluid.2012.03.023 * [2] K. Burrage and J. Erhel, On the performance of various * adaptive preconditioned GMRES strategies, 5(1998), 101-121. * [3] J. Erhel, K. Burrage and B. Pohl, Restarted GMRES * preconditioned by deflation,J. Computational and Applied * Mathematics, 69(1996), 303-318. * */ template< typename _MatrixType, typename _Preconditioner> class DGMRES : public IterativeSolverBase > { typedef IterativeSolverBase Base; using Base::matrix; using Base::m_error; using Base::m_iterations; using Base::m_info; using Base::m_isInitialized; using Base::m_tolerance; public: using Base::_solve_impl; typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; typedef typename MatrixType::StorageIndex StorageIndex; typedef typename MatrixType::RealScalar RealScalar; typedef _Preconditioner Preconditioner; typedef Matrix DenseMatrix; typedef Matrix DenseRealMatrix; typedef Matrix DenseVector; typedef Matrix DenseRealVector; typedef Matrix, Dynamic, 1> ComplexVector; /** Default constructor. */ DGMRES() : Base(),m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false) {} /** Initialize the solver with matrix \a A for further \c Ax=b solving. * * This constructor is a shortcut for the default constructor followed * by a call to compute(). * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template explicit DGMRES(const EigenBase& A) : Base(A.derived()), m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false) {} ~DGMRES() {} /** \internal */ template void _solve_with_guess_impl(const Rhs& b, Dest& x) const { bool failed = false; for(int j=0; j void _solve_impl(const Rhs& b, MatrixBase& x) const { x = b; _solve_with_guess_impl(b,x.derived()); } /** * Get the restart value */ int restart() { return m_restart; } /** * Set the restart value (default is 30) */ void set_restart(const int restart) { m_restart=restart; } /** * Set the number of eigenvalues to deflate at each restart */ void setEigenv(const int neig) { m_neig = neig; if (neig+1 > m_maxNeig) m_maxNeig = neig+1; // To allow for complex conjugates } /** * Get the size of the deflation subspace size */ int deflSize() {return m_r; } /** * Set the maximum size of the deflation subspace */ void setMaxEigenv(const int maxNeig) { m_maxNeig = maxNeig; } protected: // DGMRES algorithm template void dgmres(const MatrixType& mat,const Rhs& rhs, Dest& x, const Preconditioner& precond) const; // Perform one cycle of GMRES template int dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, int& nbIts) const; // Compute data to use for deflation int dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, StorageIndex& neig) const; // Apply deflation to a vector template int dgmresApplyDeflation(const RhsType& In, DestType& Out) const; ComplexVector schurValues(const ComplexSchur& schurofH) const; ComplexVector schurValues(const RealSchur& schurofH) const; // Init data for deflation void dgmresInitDeflation(Index& rows) const; mutable DenseMatrix m_V; // Krylov basis vectors mutable DenseMatrix m_H; // Hessenberg matrix mutable DenseMatrix m_Hes; // Initial hessenberg matrix wihout Givens rotations applied mutable Index m_restart; // Maximum size of the Krylov subspace mutable DenseMatrix m_U; // Vectors that form the basis of the invariant subspace mutable DenseMatrix m_MU; // matrix operator applied to m_U (for next cycles) mutable DenseMatrix m_T; /* T=U^T*M^{-1}*A*U */ mutable PartialPivLU m_luT; // LU factorization of m_T mutable StorageIndex m_neig; //Number of eigenvalues to extract at each restart mutable int m_r; // Current number of deflated eigenvalues, size of m_U mutable int m_maxNeig; // Maximum number of eigenvalues to deflate mutable RealScalar m_lambdaN; //Modulus of the largest eigenvalue of A mutable bool m_isDeflAllocated; mutable bool m_isDeflInitialized; //Adaptive strategy mutable RealScalar m_smv; // Smaller multiple of the remaining number of steps allowed mutable bool m_force; // Force the use of deflation at each restart }; /** * \brief Perform several cycles of restarted GMRES with modified Gram Schmidt, * * A right preconditioner is used combined with deflation. * */ template< typename _MatrixType, typename _Preconditioner> template void DGMRES<_MatrixType, _Preconditioner>::dgmres(const MatrixType& mat,const Rhs& rhs, Dest& x, const Preconditioner& precond) const { //Initialization int n = mat.rows(); DenseVector r0(n); int nbIts = 0; m_H.resize(m_restart+1, m_restart); m_Hes.resize(m_restart, m_restart); m_V.resize(n,m_restart+1); //Initial residual vector and intial norm x = precond.solve(x); r0 = rhs - mat * x; RealScalar beta = r0.norm(); RealScalar normRhs = rhs.norm(); m_error = beta/normRhs; if(m_error < m_tolerance) m_info = Success; else m_info = NoConvergence; // Iterative process while (nbIts < m_iterations && m_info == NoConvergence) { dgmresCycle(mat, precond, x, r0, beta, normRhs, nbIts); // Compute the new residual vector for the restart if (nbIts < m_iterations && m_info == NoConvergence) r0 = rhs - mat * x; } } /** * \brief Perform one restart cycle of DGMRES * \param mat The coefficient matrix * \param precond The preconditioner * \param x the new approximated solution * \param r0 The initial residual vector * \param beta The norm of the residual computed so far * \param normRhs The norm of the right hand side vector * \param nbIts The number of iterations */ template< typename _MatrixType, typename _Preconditioner> template int DGMRES<_MatrixType, _Preconditioner>::dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, int& nbIts) const { //Initialization DenseVector g(m_restart+1); // Right hand side of the least square problem g.setZero(); g(0) = Scalar(beta); m_V.col(0) = r0/beta; m_info = NoConvergence; std::vector >gr(m_restart); // Givens rotations int it = 0; // Number of inner iterations int n = mat.rows(); DenseVector tv1(n), tv2(n); //Temporary vectors while (m_info == NoConvergence && it < m_restart && nbIts < m_iterations) { // Apply preconditioner(s) at right if (m_isDeflInitialized ) { dgmresApplyDeflation(m_V.col(it), tv1); // Deflation tv2 = precond.solve(tv1); } else { tv2 = precond.solve(m_V.col(it)); // User's selected preconditioner } tv1 = mat * tv2; // Orthogonalize it with the previous basis in the basis using modified Gram-Schmidt Scalar coef; for (int i = 0; i <= it; ++i) { coef = tv1.dot(m_V.col(i)); tv1 = tv1 - coef * m_V.col(i); m_H(i,it) = coef; m_Hes(i,it) = coef; } // Normalize the vector coef = tv1.norm(); m_V.col(it+1) = tv1/coef; m_H(it+1, it) = coef; // m_Hes(it+1,it) = coef; // FIXME Check for happy breakdown // Update Hessenberg matrix with Givens rotations for (int i = 1; i <= it; ++i) { m_H.col(it).applyOnTheLeft(i-1,i,gr[i-1].adjoint()); } // Compute the new plane rotation gr[it].makeGivens(m_H(it, it), m_H(it+1,it)); // Apply the new rotation m_H.col(it).applyOnTheLeft(it,it+1,gr[it].adjoint()); g.applyOnTheLeft(it,it+1, gr[it].adjoint()); beta = std::abs(g(it+1)); m_error = beta/normRhs; // std::cerr << nbIts << " Relative Residual Norm " << m_error << std::endl; it++; nbIts++; if (m_error < m_tolerance) { // The method has converged m_info = Success; break; } } // Compute the new coefficients by solving the least square problem // it++; //FIXME Check first if the matrix is singular ... zero diagonal DenseVector nrs(m_restart); nrs = m_H.topLeftCorner(it,it).template triangularView().solve(g.head(it)); // Form the new solution if (m_isDeflInitialized) { tv1 = m_V.leftCols(it) * nrs; dgmresApplyDeflation(tv1, tv2); x = x + precond.solve(tv2); } else x = x + precond.solve(m_V.leftCols(it) * nrs); // Go for a new cycle and compute data for deflation if(nbIts < m_iterations && m_info == NoConvergence && m_neig > 0 && (m_r+m_neig) < m_maxNeig) dgmresComputeDeflationData(mat, precond, it, m_neig); return 0; } template< typename _MatrixType, typename _Preconditioner> void DGMRES<_MatrixType, _Preconditioner>::dgmresInitDeflation(Index& rows) const { m_U.resize(rows, m_maxNeig); m_MU.resize(rows, m_maxNeig); m_T.resize(m_maxNeig, m_maxNeig); m_lambdaN = 0.0; m_isDeflAllocated = true; } template< typename _MatrixType, typename _Preconditioner> inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_MatrixType, _Preconditioner>::schurValues(const ComplexSchur& schurofH) const { return schurofH.matrixT().diagonal(); } template< typename _MatrixType, typename _Preconditioner> inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_MatrixType, _Preconditioner>::schurValues(const RealSchur& schurofH) const { typedef typename MatrixType::Index Index; const DenseMatrix& T = schurofH.matrixT(); Index it = T.rows(); ComplexVector eig(it); Index j = 0; while (j < it-1) { if (T(j+1,j) ==Scalar(0)) { eig(j) = std::complex(T(j,j),RealScalar(0)); j++; } else { eig(j) = std::complex(T(j,j),T(j+1,j)); eig(j+1) = std::complex(T(j,j+1),T(j+1,j+1)); j++; } } if (j < it-1) eig(j) = std::complex(T(j,j),RealScalar(0)); return eig; } template< typename _MatrixType, typename _Preconditioner> int DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, StorageIndex& neig) const { // First, find the Schur form of the Hessenberg matrix H typename internal::conditional::IsComplex, ComplexSchur, RealSchur >::type schurofH; bool computeU = true; DenseMatrix matrixQ(it,it); matrixQ.setIdentity(); schurofH.computeFromHessenberg(m_Hes.topLeftCorner(it,it), matrixQ, computeU); ComplexVector eig(it); Matrixperm(it); eig = this->schurValues(schurofH); // Reorder the absolute values of Schur values DenseRealVector modulEig(it); for (int j=0; j template int DGMRES<_MatrixType, _Preconditioner>::dgmresApplyDeflation(const RhsType &x, DestType &y) const { DenseVector x1 = m_U.leftCols(m_r).transpose() * x; y = x + m_U.leftCols(m_r) * ( m_lambdaN * m_luT.solve(x1) - x1); return 0; } } // end namespace Eigen #endif RcppEigen/inst/include/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h0000644000176200001440000000473013100733213026754 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_INCOMPLETE_LU_H #define EIGEN_INCOMPLETE_LU_H namespace Eigen { template class IncompleteLU : public SparseSolverBase > { protected: typedef SparseSolverBase > Base; using Base::m_isInitialized; typedef _Scalar Scalar; typedef Matrix Vector; typedef typename Vector::Index Index; typedef SparseMatrix FactorType; public: typedef Matrix MatrixType; IncompleteLU() {} template IncompleteLU(const MatrixType& mat) { compute(mat); } Index rows() const { return m_lu.rows(); } Index cols() const { return m_lu.cols(); } template IncompleteLU& compute(const MatrixType& mat) { m_lu = mat; int size = mat.cols(); Vector diag(size); for(int i=0; i void _solve_impl(const Rhs& b, Dest& x) const { x = m_lu.template triangularView().solve(b); x = m_lu.template triangularView().solve(x); } protected: FactorType m_lu; }; } // end namespace Eigen #endif // EIGEN_INCOMPLETE_LU_H RcppEigen/inst/include/unsupported/Eigen/src/IterativeSolvers/MINRES.h0000644000176200001440000003172013100733213025410 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Giacomo Po // Copyright (C) 2011-2014 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MINRES_H_ #define EIGEN_MINRES_H_ namespace Eigen { namespace internal { /** \internal Low-level MINRES algorithm * \param mat The matrix A * \param rhs The right hand side vector b * \param x On input and initial solution, on output the computed solution. * \param precond A right preconditioner being able to efficiently solve for an * approximation of Ax=b (regardless of b) * \param iters On input the max number of iteration, on output the number of performed iterations. * \param tol_error On input the tolerance error, on output an estimation of the relative error. */ template EIGEN_DONT_INLINE void minres(const MatrixType& mat, const Rhs& rhs, Dest& x, const Preconditioner& precond, Index& iters, typename Dest::RealScalar& tol_error) { using std::sqrt; typedef typename Dest::RealScalar RealScalar; typedef typename Dest::Scalar Scalar; typedef Matrix VectorType; // Check for zero rhs const RealScalar rhsNorm2(rhs.squaredNorm()); if(rhsNorm2 == 0) { x.setZero(); iters = 0; tol_error = 0; return; } // initialize const Index maxIters(iters); // initialize maxIters to iters const Index N(mat.cols()); // the size of the matrix const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2) // Initialize preconditioned Lanczos VectorType v_old(N); // will be initialized inside loop VectorType v( VectorType::Zero(N) ); //initialize v VectorType v_new(rhs-mat*x); //initialize v_new RealScalar residualNorm2(v_new.squaredNorm()); VectorType w(N); // will be initialized inside loop VectorType w_new(precond.solve(v_new)); // initialize w_new // RealScalar beta; // will be initialized inside loop RealScalar beta_new2(v_new.dot(w_new)); eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE"); RealScalar beta_new(sqrt(beta_new2)); const RealScalar beta_one(beta_new); v_new /= beta_new; w_new /= beta_new; // Initialize other variables RealScalar c(1.0); // the cosine of the Givens rotation RealScalar c_old(1.0); RealScalar s(0.0); // the sine of the Givens rotation RealScalar s_old(0.0); // the sine of the Givens rotation VectorType p_oold(N); // will be initialized in loop VectorType p_old(VectorType::Zero(N)); // initialize p_old=0 VectorType p(p_old); // initialize p=0 RealScalar eta(1.0); iters = 0; // reset iters while ( iters < maxIters ) { // Preconditioned Lanczos /* Note that there are 4 variants on the Lanczos algorithm. These are * described in Paige, C. C. (1972). Computational variants of * the Lanczos method for the eigenproblem. IMA Journal of Applied * Mathematics, 10(3), 373–381. The current implementation corresponds * to the case A(2,7) in the paper. It also corresponds to * algorithm 6.14 in Y. Saad, Iterative Methods for Sparse Linear * Systems, 2003 p.173. For the preconditioned version see * A. Greenbaum, Iterative Methods for Solving Linear Systems, SIAM (1987). */ const RealScalar beta(beta_new); v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter // const VectorType v_old(v); // NOT SURE IF CREATING v_old EVERY ITERATION IS EFFICIENT v = v_new; // update w = w_new; // update // const VectorType w(w_new); // NOT SURE IF CREATING w EVERY ITERATION IS EFFICIENT v_new.noalias() = mat*w - beta*v_old; // compute v_new const RealScalar alpha = v_new.dot(w); v_new -= alpha*v; // overwrite v_new w_new = precond.solve(v_new); // overwrite w_new beta_new2 = v_new.dot(w_new); // compute beta_new eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE"); beta_new = sqrt(beta_new2); // compute beta_new v_new /= beta_new; // overwrite v_new for next iteration w_new /= beta_new; // overwrite w_new for next iteration // Givens rotation const RealScalar r2 =s*alpha+c*c_old*beta; // s, s_old, c and c_old are still from previous iteration const RealScalar r3 =s_old*beta; // s, s_old, c and c_old are still from previous iteration const RealScalar r1_hat=c*alpha-c_old*s*beta; const RealScalar r1 =sqrt( std::pow(r1_hat,2) + std::pow(beta_new,2) ); c_old = c; // store for next iteration s_old = s; // store for next iteration c=r1_hat/r1; // new cosine s=beta_new/r1; // new sine // Update solution p_oold = p_old; // const VectorType p_oold(p_old); // NOT SURE IF CREATING p_oold EVERY ITERATION IS EFFICIENT p_old = p; p.noalias()=(w-r2*p_old-r3*p_oold) /r1; // IS NOALIAS REQUIRED? x += beta_one*c*eta*p; /* Update the squared residual. Note that this is the estimated residual. The real residual |Ax-b|^2 may be slightly larger */ residualNorm2 *= s*s; if ( residualNorm2 < threshold2) { break; } eta=-s*eta; // update eta iters++; // increment iteration number (for output purposes) } /* Compute error. Note that this is the estimated error. The real error |Ax-b|/|b| may be slightly larger */ tol_error = std::sqrt(residualNorm2 / rhsNorm2); } } template< typename _MatrixType, int _UpLo=Lower, typename _Preconditioner = IdentityPreconditioner> class MINRES; namespace internal { template< typename _MatrixType, int _UpLo, typename _Preconditioner> struct traits > { typedef _MatrixType MatrixType; typedef _Preconditioner Preconditioner; }; } /** \ingroup IterativeLinearSolvers_Module * \brief A minimal residual solver for sparse symmetric problems * * This class allows to solve for A.x = b sparse linear problems using the MINRES algorithm * of Paige and Saunders (1975). The sparse matrix A must be symmetric (possibly indefinite). * The vectors x and b can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower, * Upper, or Lower|Upper in which the full matrix entries will be considered. Default is Lower. * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner * * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations * and NumTraits::epsilon() for the tolerance. * * This class can be used as the direct solver classes. Here is a typical usage example: * \code * int n = 10000; * VectorXd x(n), b(n); * SparseMatrix A(n,n); * // fill A and b * MINRES > mr; * mr.compute(A); * x = mr.solve(b); * std::cout << "#iterations: " << mr.iterations() << std::endl; * std::cout << "estimated error: " << mr.error() << std::endl; * // update b, and solve again * x = mr.solve(b); * \endcode * * By default the iterations start with x=0 as an initial guess of the solution. * One can control the start using the solveWithGuess() method. * * MINRES can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. * * \sa class ConjugateGradient, BiCGSTAB, SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner */ template< typename _MatrixType, int _UpLo, typename _Preconditioner> class MINRES : public IterativeSolverBase > { typedef IterativeSolverBase Base; using Base::matrix; using Base::m_error; using Base::m_iterations; using Base::m_info; using Base::m_isInitialized; public: using Base::_solve_impl; typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef _Preconditioner Preconditioner; enum {UpLo = _UpLo}; public: /** Default constructor. */ MINRES() : Base() {} /** Initialize the solver with matrix \a A for further \c Ax=b solving. * * This constructor is a shortcut for the default constructor followed * by a call to compute(). * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template explicit MINRES(const EigenBase& A) : Base(A.derived()) {} /** Destructor. */ ~MINRES(){} /** \internal */ template void _solve_with_guess_impl(const Rhs& b, Dest& x) const { typedef typename Base::MatrixWrapper MatrixWrapper; typedef typename Base::ActualMatrixType ActualMatrixType; enum { TransposeInput = (!MatrixWrapper::MatrixFree) && (UpLo==(Lower|Upper)) && (!MatrixType::IsRowMajor) && (!NumTraits::IsComplex) }; typedef typename internal::conditional, ActualMatrixType const&>::type RowMajorWrapper; EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MatrixWrapper::MatrixFree,UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY); typedef typename internal::conditional::Type >::type SelfAdjointWrapper; m_iterations = Base::maxIterations(); m_error = Base::m_tolerance; RowMajorWrapper row_mat(matrix()); for(int j=0; j void _solve_impl(const Rhs& b, MatrixBase &x) const { x.setZero(); _solve_with_guess_impl(b,x.derived()); } protected: }; } // end namespace Eigen #endif // EIGEN_MINRES_H RcppEigen/inst/include/unsupported/Eigen/src/MatrixFunctions/0000755000176200001440000000000013235710577024103 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/MatrixFunctions/StemFunction.h0000644000176200001440000000407313100733213026655 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010, 2013 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STEM_FUNCTION #define EIGEN_STEM_FUNCTION namespace Eigen { namespace internal { /** \brief The exponential function (and its derivatives). */ template Scalar stem_function_exp(Scalar x, int) { using std::exp; return exp(x); } /** \brief Cosine (and its derivatives). */ template Scalar stem_function_cos(Scalar x, int n) { using std::cos; using std::sin; Scalar res; switch (n % 4) { case 0: res = std::cos(x); break; case 1: res = -std::sin(x); break; case 2: res = -std::cos(x); break; case 3: res = std::sin(x); break; } return res; } /** \brief Sine (and its derivatives). */ template Scalar stem_function_sin(Scalar x, int n) { using std::cos; using std::sin; Scalar res; switch (n % 4) { case 0: res = std::sin(x); break; case 1: res = std::cos(x); break; case 2: res = -std::sin(x); break; case 3: res = -std::cos(x); break; } return res; } /** \brief Hyperbolic cosine (and its derivatives). */ template Scalar stem_function_cosh(Scalar x, int n) { using std::cosh; using std::sinh; Scalar res; switch (n % 2) { case 0: res = std::cosh(x); break; case 1: res = std::sinh(x); break; } return res; } /** \brief Hyperbolic sine (and its derivatives). */ template Scalar stem_function_sinh(Scalar x, int n) { using std::cosh; using std::sinh; Scalar res; switch (n % 2) { case 0: res = std::sinh(x); break; case 1: res = std::cosh(x); break; } return res; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_STEM_FUNCTION RcppEigen/inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h0000644000176200001440000004026613235710577027737 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009, 2010, 2013 Jitse Niesen // Copyright (C) 2011, 2013 Chen-Pang He // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIX_EXPONENTIAL #define EIGEN_MATRIX_EXPONENTIAL #include "StemFunction.h" namespace Eigen { namespace internal { /** \brief Scaling operator. * * This struct is used by CwiseUnaryOp to scale a matrix by \f$ 2^{-s} \f$. */ template struct MatrixExponentialScalingOp { /** \brief Constructor. * * \param[in] squarings The integer \f$ s \f$ in this document. */ MatrixExponentialScalingOp(int squarings) : m_squarings(squarings) { } /** \brief Scale a matrix coefficient. * * \param[in,out] x The scalar to be scaled, becoming \f$ 2^{-s} x \f$. */ inline const RealScalar operator() (const RealScalar& x) const { using std::ldexp; return ldexp(x, -m_squarings); } typedef std::complex ComplexScalar; /** \brief Scale a matrix coefficient. * * \param[in,out] x The scalar to be scaled, becoming \f$ 2^{-s} x \f$. */ inline const ComplexScalar operator() (const ComplexScalar& x) const { using std::ldexp; return ComplexScalar(ldexp(x.real(), -m_squarings), ldexp(x.imag(), -m_squarings)); } private: int m_squarings; }; /** \brief Compute the (3,3)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. */ template void matrix_exp_pade3(const MatA& A, MatU& U, MatV& V) { typedef typename MatA::PlainObject MatrixType; typedef typename NumTraits::Scalar>::Real RealScalar; const RealScalar b[] = {120.L, 60.L, 12.L, 1.L}; const MatrixType A2 = A * A; const MatrixType tmp = b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols()); U.noalias() = A * tmp; V = b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols()); } /** \brief Compute the (5,5)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. */ template void matrix_exp_pade5(const MatA& A, MatU& U, MatV& V) { typedef typename MatA::PlainObject MatrixType; typedef typename NumTraits::Scalar>::Real RealScalar; const RealScalar b[] = {30240.L, 15120.L, 3360.L, 420.L, 30.L, 1.L}; const MatrixType A2 = A * A; const MatrixType A4 = A2 * A2; const MatrixType tmp = b[5] * A4 + b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols()); U.noalias() = A * tmp; V = b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols()); } /** \brief Compute the (7,7)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. */ template void matrix_exp_pade7(const MatA& A, MatU& U, MatV& V) { typedef typename MatA::PlainObject MatrixType; typedef typename NumTraits::Scalar>::Real RealScalar; const RealScalar b[] = {17297280.L, 8648640.L, 1995840.L, 277200.L, 25200.L, 1512.L, 56.L, 1.L}; const MatrixType A2 = A * A; const MatrixType A4 = A2 * A2; const MatrixType A6 = A4 * A2; const MatrixType tmp = b[7] * A6 + b[5] * A4 + b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols()); U.noalias() = A * tmp; V = b[6] * A6 + b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols()); } /** \brief Compute the (9,9)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. */ template void matrix_exp_pade9(const MatA& A, MatU& U, MatV& V) { typedef typename MatA::PlainObject MatrixType; typedef typename NumTraits::Scalar>::Real RealScalar; const RealScalar b[] = {17643225600.L, 8821612800.L, 2075673600.L, 302702400.L, 30270240.L, 2162160.L, 110880.L, 3960.L, 90.L, 1.L}; const MatrixType A2 = A * A; const MatrixType A4 = A2 * A2; const MatrixType A6 = A4 * A2; const MatrixType A8 = A6 * A2; const MatrixType tmp = b[9] * A8 + b[7] * A6 + b[5] * A4 + b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols()); U.noalias() = A * tmp; V = b[8] * A8 + b[6] * A6 + b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols()); } /** \brief Compute the (13,13)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. */ template void matrix_exp_pade13(const MatA& A, MatU& U, MatV& V) { typedef typename MatA::PlainObject MatrixType; typedef typename NumTraits::Scalar>::Real RealScalar; const RealScalar b[] = {64764752532480000.L, 32382376266240000.L, 7771770303897600.L, 1187353796428800.L, 129060195264000.L, 10559470521600.L, 670442572800.L, 33522128640.L, 1323241920.L, 40840800.L, 960960.L, 16380.L, 182.L, 1.L}; const MatrixType A2 = A * A; const MatrixType A4 = A2 * A2; const MatrixType A6 = A4 * A2; V = b[13] * A6 + b[11] * A4 + b[9] * A2; // used for temporary storage MatrixType tmp = A6 * V; tmp += b[7] * A6 + b[5] * A4 + b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols()); U.noalias() = A * tmp; tmp = b[12] * A6 + b[10] * A4 + b[8] * A2; V.noalias() = A6 * tmp; V += b[6] * A6 + b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols()); } /** \brief Compute the (17,17)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. * * This function activates only if your long double is double-double or quadruple. */ #if LDBL_MANT_DIG > 64 template void matrix_exp_pade17(const MatA& A, MatU& U, MatV& V) { typedef typename MatA::PlainObject MatrixType; typedef typename NumTraits::Scalar>::Real RealScalar; const RealScalar b[] = {830034394580628357120000.L, 415017197290314178560000.L, 100610229646136770560000.L, 15720348382208870400000.L, 1774878043152614400000.L, 153822763739893248000.L, 10608466464820224000.L, 595373117923584000.L, 27563570274240000.L, 1060137318240000.L, 33924394183680.L, 899510451840.L, 19554575040.L, 341863200.L, 4651200.L, 46512.L, 306.L, 1.L}; const MatrixType A2 = A * A; const MatrixType A4 = A2 * A2; const MatrixType A6 = A4 * A2; const MatrixType A8 = A4 * A4; V = b[17] * A8 + b[15] * A6 + b[13] * A4 + b[11] * A2; // used for temporary storage MatrixType tmp = A8 * V; tmp += b[9] * A8 + b[7] * A6 + b[5] * A4 + b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols()); U.noalias() = A * tmp; tmp = b[16] * A8 + b[14] * A6 + b[12] * A4 + b[10] * A2; V.noalias() = tmp * A8; V += b[8] * A8 + b[6] * A6 + b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols()); } #endif template ::Scalar>::Real> struct matrix_exp_computeUV { /** \brief Compute Padé approximant to the exponential. * * Computes \c U, \c V and \c squarings such that \f$ (V+U)(V-U)^{-1} \f$ is a Padé * approximant of \f$ \exp(2^{-\mbox{squarings}}M) \f$ around \f$ M = 0 \f$, where \f$ M \f$ * denotes the matrix \c arg. The degree of the Padé approximant and the value of squarings * are chosen such that the approximation error is no more than the round-off error. */ static void run(const MatrixType& arg, MatrixType& U, MatrixType& V, int& squarings); }; template struct matrix_exp_computeUV { template static void run(const ArgType& arg, MatrixType& U, MatrixType& V, int& squarings) { using std::frexp; using std::pow; const float l1norm = arg.cwiseAbs().colwise().sum().maxCoeff(); squarings = 0; if (l1norm < 4.258730016922831e-001f) { matrix_exp_pade3(arg, U, V); } else if (l1norm < 1.880152677804762e+000f) { matrix_exp_pade5(arg, U, V); } else { const float maxnorm = 3.925724783138660f; frexp(l1norm / maxnorm, &squarings); if (squarings < 0) squarings = 0; MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp(squarings)); matrix_exp_pade7(A, U, V); } } }; template struct matrix_exp_computeUV { template static void run(const ArgType& arg, MatrixType& U, MatrixType& V, int& squarings) { using std::frexp; using std::pow; const double l1norm = arg.cwiseAbs().colwise().sum().maxCoeff(); squarings = 0; if (l1norm < 1.495585217958292e-002) { matrix_exp_pade3(arg, U, V); } else if (l1norm < 2.539398330063230e-001) { matrix_exp_pade5(arg, U, V); } else if (l1norm < 9.504178996162932e-001) { matrix_exp_pade7(arg, U, V); } else if (l1norm < 2.097847961257068e+000) { matrix_exp_pade9(arg, U, V); } else { const double maxnorm = 5.371920351148152; frexp(l1norm / maxnorm, &squarings); if (squarings < 0) squarings = 0; MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp(squarings)); matrix_exp_pade13(A, U, V); } } }; template struct matrix_exp_computeUV { template static void run(const ArgType& arg, MatrixType& U, MatrixType& V, int& squarings) { #if LDBL_MANT_DIG == 53 // double precision matrix_exp_computeUV::run(arg, U, V, squarings); #else using std::frexp; using std::pow; const long double l1norm = arg.cwiseAbs().colwise().sum().maxCoeff(); squarings = 0; #if LDBL_MANT_DIG <= 64 // extended precision if (l1norm < 4.1968497232266989671e-003L) { matrix_exp_pade3(arg, U, V); } else if (l1norm < 1.1848116734693823091e-001L) { matrix_exp_pade5(arg, U, V); } else if (l1norm < 5.5170388480686700274e-001L) { matrix_exp_pade7(arg, U, V); } else if (l1norm < 1.3759868875587845383e+000L) { matrix_exp_pade9(arg, U, V); } else { const long double maxnorm = 4.0246098906697353063L; frexp(l1norm / maxnorm, &squarings); if (squarings < 0) squarings = 0; MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp(squarings)); matrix_exp_pade13(A, U, V); } #elif LDBL_MANT_DIG <= 106 // double-double if (l1norm < 3.2787892205607026992947488108213e-005L) { matrix_exp_pade3(arg, U, V); } else if (l1norm < 6.4467025060072760084130906076332e-003L) { matrix_exp_pade5(arg, U, V); } else if (l1norm < 6.8988028496595374751374122881143e-002L) { matrix_exp_pade7(arg, U, V); } else if (l1norm < 2.7339737518502231741495857201670e-001L) { matrix_exp_pade9(arg, U, V); } else if (l1norm < 1.3203382096514474905666448850278e+000L) { matrix_exp_pade13(arg, U, V); } else { const long double maxnorm = 3.2579440895405400856599663723517L; frexp(l1norm / maxnorm, &squarings); if (squarings < 0) squarings = 0; MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp(squarings)); matrix_exp_pade17(A, U, V); } #elif LDBL_MANT_DIG <= 112 // quadruple precison if (l1norm < 1.639394610288918690547467954466970e-005L) { matrix_exp_pade3(arg, U, V); } else if (l1norm < 4.253237712165275566025884344433009e-003L) { matrix_exp_pade5(arg, U, V); } else if (l1norm < 5.125804063165764409885122032933142e-002L) { matrix_exp_pade7(arg, U, V); } else if (l1norm < 2.170000765161155195453205651889853e-001L) { matrix_exp_pade9(arg, U, V); } else if (l1norm < 1.125358383453143065081397882891878e+000L) { matrix_exp_pade13(arg, U, V); } else { const long double maxnorm = 2.884233277829519311757165057717815L; frexp(l1norm / maxnorm, &squarings); if (squarings < 0) squarings = 0; MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp(squarings)); matrix_exp_pade17(A, U, V); } #else // this case should be handled in compute() eigen_assert(false && "Bug in MatrixExponential"); #endif #endif // LDBL_MANT_DIG } }; template struct is_exp_known_type : false_type {}; template<> struct is_exp_known_type : true_type {}; template<> struct is_exp_known_type : true_type {}; #if LDBL_MANT_DIG <= 112 template<> struct is_exp_known_type : true_type {}; #endif template void matrix_exp_compute(const ArgType& arg, ResultType &result, true_type) // natively supported scalar type { typedef typename ArgType::PlainObject MatrixType; MatrixType U, V; int squarings; matrix_exp_computeUV::run(arg, U, V, squarings); // Pade approximant is (U+V) / (-U+V) MatrixType numer = U + V; MatrixType denom = -U + V; result = denom.partialPivLu().solve(numer); for (int i=0; i void matrix_exp_compute(const ArgType& arg, ResultType &result, false_type) // default { typedef typename ArgType::PlainObject MatrixType; typedef typename traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename std::complex ComplexScalar; result = arg.matrixFunction(internal::stem_function_exp); } } // end namespace Eigen::internal /** \ingroup MatrixFunctions_Module * * \brief Proxy for the matrix exponential of some matrix (expression). * * \tparam Derived Type of the argument to the matrix exponential. * * This class holds the argument to the matrix exponential until it is assigned or evaluated for * some other reason (so the argument should not be changed in the meantime). It is the return type * of MatrixBase::exp() and most of the time this is the only way it is used. */ template struct MatrixExponentialReturnValue : public ReturnByValue > { typedef typename Derived::Index Index; public: /** \brief Constructor. * * \param src %Matrix (expression) forming the argument of the matrix exponential. */ MatrixExponentialReturnValue(const Derived& src) : m_src(src) { } /** \brief Compute the matrix exponential. * * \param result the matrix exponential of \p src in the constructor. */ template inline void evalTo(ResultType& result) const { const typename internal::nested_eval::type tmp(m_src); internal::matrix_exp_compute(tmp, result, internal::is_exp_known_type()); } Index rows() const { return m_src.rows(); } Index cols() const { return m_src.cols(); } protected: const typename internal::ref_selector::type m_src; }; namespace internal { template struct traits > { typedef typename Derived::PlainObject ReturnType; }; } template const MatrixExponentialReturnValue MatrixBase::exp() const { eigen_assert(rows() == cols()); return MatrixExponentialReturnValue(derived()); } } // end namespace Eigen #endif // EIGEN_MATRIX_EXPONENTIAL RcppEigen/inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h0000644000176200001440000003422513100733213027532 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011, 2013 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIX_SQUARE_ROOT #define EIGEN_MATRIX_SQUARE_ROOT namespace Eigen { namespace internal { // pre: T.block(i,i,2,2) has complex conjugate eigenvalues // post: sqrtT.block(i,i,2,2) is square root of T.block(i,i,2,2) template void matrix_sqrt_quasi_triangular_2x2_diagonal_block(const MatrixType& T, typename MatrixType::Index i, ResultType& sqrtT) { // TODO: This case (2-by-2 blocks with complex conjugate eigenvalues) is probably hidden somewhere // in EigenSolver. If we expose it, we could call it directly from here. typedef typename traits::Scalar Scalar; Matrix block = T.template block<2,2>(i,i); EigenSolver > es(block); sqrtT.template block<2,2>(i,i) = (es.eigenvectors() * es.eigenvalues().cwiseSqrt().asDiagonal() * es.eigenvectors().inverse()).real(); } // pre: block structure of T is such that (i,j) is a 1x1 block, // all blocks of sqrtT to left of and below (i,j) are correct // post: sqrtT(i,j) has the correct value template void matrix_sqrt_quasi_triangular_1x1_off_diagonal_block(const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j, ResultType& sqrtT) { typedef typename traits::Scalar Scalar; Scalar tmp = (sqrtT.row(i).segment(i+1,j-i-1) * sqrtT.col(j).segment(i+1,j-i-1)).value(); sqrtT.coeffRef(i,j) = (T.coeff(i,j) - tmp) / (sqrtT.coeff(i,i) + sqrtT.coeff(j,j)); } // similar to compute1x1offDiagonalBlock() template void matrix_sqrt_quasi_triangular_1x2_off_diagonal_block(const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j, ResultType& sqrtT) { typedef typename traits::Scalar Scalar; Matrix rhs = T.template block<1,2>(i,j); if (j-i > 1) rhs -= sqrtT.block(i, i+1, 1, j-i-1) * sqrtT.block(i+1, j, j-i-1, 2); Matrix A = sqrtT.coeff(i,i) * Matrix::Identity(); A += sqrtT.template block<2,2>(j,j).transpose(); sqrtT.template block<1,2>(i,j).transpose() = A.fullPivLu().solve(rhs.transpose()); } // similar to compute1x1offDiagonalBlock() template void matrix_sqrt_quasi_triangular_2x1_off_diagonal_block(const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j, ResultType& sqrtT) { typedef typename traits::Scalar Scalar; Matrix rhs = T.template block<2,1>(i,j); if (j-i > 2) rhs -= sqrtT.block(i, i+2, 2, j-i-2) * sqrtT.block(i+2, j, j-i-2, 1); Matrix A = sqrtT.coeff(j,j) * Matrix::Identity(); A += sqrtT.template block<2,2>(i,i); sqrtT.template block<2,1>(i,j) = A.fullPivLu().solve(rhs); } // solves the equation A X + X B = C where all matrices are 2-by-2 template void matrix_sqrt_quasi_triangular_solve_auxiliary_equation(MatrixType& X, const MatrixType& A, const MatrixType& B, const MatrixType& C) { typedef typename traits::Scalar Scalar; Matrix coeffMatrix = Matrix::Zero(); coeffMatrix.coeffRef(0,0) = A.coeff(0,0) + B.coeff(0,0); coeffMatrix.coeffRef(1,1) = A.coeff(0,0) + B.coeff(1,1); coeffMatrix.coeffRef(2,2) = A.coeff(1,1) + B.coeff(0,0); coeffMatrix.coeffRef(3,3) = A.coeff(1,1) + B.coeff(1,1); coeffMatrix.coeffRef(0,1) = B.coeff(1,0); coeffMatrix.coeffRef(0,2) = A.coeff(0,1); coeffMatrix.coeffRef(1,0) = B.coeff(0,1); coeffMatrix.coeffRef(1,3) = A.coeff(0,1); coeffMatrix.coeffRef(2,0) = A.coeff(1,0); coeffMatrix.coeffRef(2,3) = B.coeff(1,0); coeffMatrix.coeffRef(3,1) = A.coeff(1,0); coeffMatrix.coeffRef(3,2) = B.coeff(0,1); Matrix rhs; rhs.coeffRef(0) = C.coeff(0,0); rhs.coeffRef(1) = C.coeff(0,1); rhs.coeffRef(2) = C.coeff(1,0); rhs.coeffRef(3) = C.coeff(1,1); Matrix result; result = coeffMatrix.fullPivLu().solve(rhs); X.coeffRef(0,0) = result.coeff(0); X.coeffRef(0,1) = result.coeff(1); X.coeffRef(1,0) = result.coeff(2); X.coeffRef(1,1) = result.coeff(3); } // similar to compute1x1offDiagonalBlock() template void matrix_sqrt_quasi_triangular_2x2_off_diagonal_block(const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j, ResultType& sqrtT) { typedef typename traits::Scalar Scalar; Matrix A = sqrtT.template block<2,2>(i,i); Matrix B = sqrtT.template block<2,2>(j,j); Matrix C = T.template block<2,2>(i,j); if (j-i > 2) C -= sqrtT.block(i, i+2, 2, j-i-2) * sqrtT.block(i+2, j, j-i-2, 2); Matrix X; matrix_sqrt_quasi_triangular_solve_auxiliary_equation(X, A, B, C); sqrtT.template block<2,2>(i,j) = X; } // pre: T is quasi-upper-triangular and sqrtT is a zero matrix of the same size // post: the diagonal blocks of sqrtT are the square roots of the diagonal blocks of T template void matrix_sqrt_quasi_triangular_diagonal(const MatrixType& T, ResultType& sqrtT) { using std::sqrt; typedef typename MatrixType::Index Index; const Index size = T.rows(); for (Index i = 0; i < size; i++) { if (i == size - 1 || T.coeff(i+1, i) == 0) { eigen_assert(T(i,i) >= 0); sqrtT.coeffRef(i,i) = sqrt(T.coeff(i,i)); } else { matrix_sqrt_quasi_triangular_2x2_diagonal_block(T, i, sqrtT); ++i; } } } // pre: T is quasi-upper-triangular and diagonal blocks of sqrtT are square root of diagonal blocks of T. // post: sqrtT is the square root of T. template void matrix_sqrt_quasi_triangular_off_diagonal(const MatrixType& T, ResultType& sqrtT) { typedef typename MatrixType::Index Index; const Index size = T.rows(); for (Index j = 1; j < size; j++) { if (T.coeff(j, j-1) != 0) // if T(j-1:j, j-1:j) is a 2-by-2 block continue; for (Index i = j-1; i >= 0; i--) { if (i > 0 && T.coeff(i, i-1) != 0) // if T(i-1:i, i-1:i) is a 2-by-2 block continue; bool iBlockIs2x2 = (i < size - 1) && (T.coeff(i+1, i) != 0); bool jBlockIs2x2 = (j < size - 1) && (T.coeff(j+1, j) != 0); if (iBlockIs2x2 && jBlockIs2x2) matrix_sqrt_quasi_triangular_2x2_off_diagonal_block(T, i, j, sqrtT); else if (iBlockIs2x2 && !jBlockIs2x2) matrix_sqrt_quasi_triangular_2x1_off_diagonal_block(T, i, j, sqrtT); else if (!iBlockIs2x2 && jBlockIs2x2) matrix_sqrt_quasi_triangular_1x2_off_diagonal_block(T, i, j, sqrtT); else if (!iBlockIs2x2 && !jBlockIs2x2) matrix_sqrt_quasi_triangular_1x1_off_diagonal_block(T, i, j, sqrtT); } } } } // end of namespace internal /** \ingroup MatrixFunctions_Module * \brief Compute matrix square root of quasi-triangular matrix. * * \tparam MatrixType type of \p arg, the argument of matrix square root, * expected to be an instantiation of the Matrix class template. * \tparam ResultType type of \p result, where result is to be stored. * \param[in] arg argument of matrix square root. * \param[out] result matrix square root of upper Hessenberg part of \p arg. * * This function computes the square root of the upper quasi-triangular matrix stored in the upper * Hessenberg part of \p arg. Only the upper Hessenberg part of \p result is updated, the rest is * not touched. See MatrixBase::sqrt() for details on how this computation is implemented. * * \sa MatrixSquareRoot, MatrixSquareRootQuasiTriangular */ template void matrix_sqrt_quasi_triangular(const MatrixType &arg, ResultType &result) { eigen_assert(arg.rows() == arg.cols()); result.resize(arg.rows(), arg.cols()); internal::matrix_sqrt_quasi_triangular_diagonal(arg, result); internal::matrix_sqrt_quasi_triangular_off_diagonal(arg, result); } /** \ingroup MatrixFunctions_Module * \brief Compute matrix square root of triangular matrix. * * \tparam MatrixType type of \p arg, the argument of matrix square root, * expected to be an instantiation of the Matrix class template. * \tparam ResultType type of \p result, where result is to be stored. * \param[in] arg argument of matrix square root. * \param[out] result matrix square root of upper triangular part of \p arg. * * Only the upper triangular part (including the diagonal) of \p result is updated, the rest is not * touched. See MatrixBase::sqrt() for details on how this computation is implemented. * * \sa MatrixSquareRoot, MatrixSquareRootQuasiTriangular */ template void matrix_sqrt_triangular(const MatrixType &arg, ResultType &result) { using std::sqrt; typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; eigen_assert(arg.rows() == arg.cols()); // Compute square root of arg and store it in upper triangular part of result // This uses that the square root of triangular matrices can be computed directly. result.resize(arg.rows(), arg.cols()); for (Index i = 0; i < arg.rows(); i++) { result.coeffRef(i,i) = sqrt(arg.coeff(i,i)); } for (Index j = 1; j < arg.cols(); j++) { for (Index i = j-1; i >= 0; i--) { // if i = j-1, then segment has length 0 so tmp = 0 Scalar tmp = (result.row(i).segment(i+1,j-i-1) * result.col(j).segment(i+1,j-i-1)).value(); // denominator may be zero if original matrix is singular result.coeffRef(i,j) = (arg.coeff(i,j) - tmp) / (result.coeff(i,i) + result.coeff(j,j)); } } } namespace internal { /** \ingroup MatrixFunctions_Module * \brief Helper struct for computing matrix square roots of general matrices. * \tparam MatrixType type of the argument of the matrix square root, * expected to be an instantiation of the Matrix class template. * * \sa MatrixSquareRootTriangular, MatrixSquareRootQuasiTriangular, MatrixBase::sqrt() */ template ::Scalar>::IsComplex> struct matrix_sqrt_compute { /** \brief Compute the matrix square root * * \param[in] arg matrix whose square root is to be computed. * \param[out] result square root of \p arg. * * See MatrixBase::sqrt() for details on how this computation is implemented. */ template static void run(const MatrixType &arg, ResultType &result); }; // ********** Partial specialization for real matrices ********** template struct matrix_sqrt_compute { template static void run(const MatrixType &arg, ResultType &result) { eigen_assert(arg.rows() == arg.cols()); // Compute Schur decomposition of arg const RealSchur schurOfA(arg); const MatrixType& T = schurOfA.matrixT(); const MatrixType& U = schurOfA.matrixU(); // Compute square root of T MatrixType sqrtT = MatrixType::Zero(arg.rows(), arg.cols()); matrix_sqrt_quasi_triangular(T, sqrtT); // Compute square root of arg result = U * sqrtT * U.adjoint(); } }; // ********** Partial specialization for complex matrices ********** template struct matrix_sqrt_compute { template static void run(const MatrixType &arg, ResultType &result) { eigen_assert(arg.rows() == arg.cols()); // Compute Schur decomposition of arg const ComplexSchur schurOfA(arg); const MatrixType& T = schurOfA.matrixT(); const MatrixType& U = schurOfA.matrixU(); // Compute square root of T MatrixType sqrtT; matrix_sqrt_triangular(T, sqrtT); // Compute square root of arg result = U * (sqrtT.template triangularView() * U.adjoint()); } }; } // end namespace internal /** \ingroup MatrixFunctions_Module * * \brief Proxy for the matrix square root of some matrix (expression). * * \tparam Derived Type of the argument to the matrix square root. * * This class holds the argument to the matrix square root until it * is assigned or evaluated for some other reason (so the argument * should not be changed in the meantime). It is the return type of * MatrixBase::sqrt() and most of the time this is the only way it is * used. */ template class MatrixSquareRootReturnValue : public ReturnByValue > { protected: typedef typename Derived::Index Index; typedef typename internal::ref_selector::type DerivedNested; public: /** \brief Constructor. * * \param[in] src %Matrix (expression) forming the argument of the * matrix square root. */ explicit MatrixSquareRootReturnValue(const Derived& src) : m_src(src) { } /** \brief Compute the matrix square root. * * \param[out] result the matrix square root of \p src in the * constructor. */ template inline void evalTo(ResultType& result) const { typedef typename internal::nested_eval::type DerivedEvalType; typedef typename internal::remove_all::type DerivedEvalTypeClean; DerivedEvalType tmp(m_src); internal::matrix_sqrt_compute::run(tmp, result); } Index rows() const { return m_src.rows(); } Index cols() const { return m_src.cols(); } protected: const DerivedNested m_src; }; namespace internal { template struct traits > { typedef typename Derived::PlainObject ReturnType; }; } template const MatrixSquareRootReturnValue MatrixBase::sqrt() const { eigen_assert(rows() == cols()); return MatrixSquareRootReturnValue(derived()); } } // end namespace Eigen #endif // EIGEN_MATRIX_FUNCTION RcppEigen/inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h0000644000176200001440000004202113235710577027366 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011, 2013 Jitse Niesen // Copyright (C) 2011 Chen-Pang He // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIX_LOGARITHM #define EIGEN_MATRIX_LOGARITHM namespace Eigen { namespace internal { template struct matrix_log_min_pade_degree { static const int value = 3; }; template struct matrix_log_max_pade_degree { typedef typename NumTraits::Real RealScalar; static const int value = std::numeric_limits::digits<= 24? 5: // single precision std::numeric_limits::digits<= 53? 7: // double precision std::numeric_limits::digits<= 64? 8: // extended precision std::numeric_limits::digits<=106? 10: // double-double 11; // quadruple precision }; /** \brief Compute logarithm of 2x2 triangular matrix. */ template void matrix_log_compute_2x2(const MatrixType& A, MatrixType& result) { typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; using std::abs; using std::ceil; using std::imag; using std::log; Scalar logA00 = log(A(0,0)); Scalar logA11 = log(A(1,1)); result(0,0) = logA00; result(1,0) = Scalar(0); result(1,1) = logA11; Scalar y = A(1,1) - A(0,0); if (y==Scalar(0)) { result(0,1) = A(0,1) / A(0,0); } else if ((abs(A(0,0)) < RealScalar(0.5)*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1)))) { result(0,1) = A(0,1) * (logA11 - logA00) / y; } else { // computation in previous branch is inaccurate if A(1,1) \approx A(0,0) int unwindingNumber = static_cast(ceil((imag(logA11 - logA00) - RealScalar(EIGEN_PI)) / RealScalar(2*EIGEN_PI))); result(0,1) = A(0,1) * (numext::log1p(y/A(0,0)) + Scalar(0,2*EIGEN_PI*unwindingNumber)) / y; } } /* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = float) */ inline int matrix_log_get_pade_degree(float normTminusI) { const float maxNormForPade[] = { 2.5111573934555054e-1 /* degree = 3 */ , 4.0535837411880493e-1, 5.3149729967117310e-1 }; const int minPadeDegree = matrix_log_min_pade_degree::value; const int maxPadeDegree = matrix_log_max_pade_degree::value; int degree = minPadeDegree; for (; degree <= maxPadeDegree; ++degree) if (normTminusI <= maxNormForPade[degree - minPadeDegree]) break; return degree; } /* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = double) */ inline int matrix_log_get_pade_degree(double normTminusI) { const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2, 1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 }; const int minPadeDegree = matrix_log_min_pade_degree::value; const int maxPadeDegree = matrix_log_max_pade_degree::value; int degree = minPadeDegree; for (; degree <= maxPadeDegree; ++degree) if (normTminusI <= maxNormForPade[degree - minPadeDegree]) break; return degree; } /* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = long double) */ inline int matrix_log_get_pade_degree(long double normTminusI) { #if LDBL_MANT_DIG == 53 // double precision const long double maxNormForPade[] = { 1.6206284795015624e-2L /* degree = 3 */ , 5.3873532631381171e-2L, 1.1352802267628681e-1L, 1.8662860613541288e-1L, 2.642960831111435e-1L }; #elif LDBL_MANT_DIG <= 64 // extended precision const long double maxNormForPade[] = { 5.48256690357782863103e-3L /* degree = 3 */, 2.34559162387971167321e-2L, 5.84603923897347449857e-2L, 1.08486423756725170223e-1L, 1.68385767881294446649e-1L, 2.32777776523703892094e-1L }; #elif LDBL_MANT_DIG <= 106 // double-double const long double maxNormForPade[] = { 8.58970550342939562202529664318890e-5L /* degree = 3 */, 9.34074328446359654039446552677759e-4L, 4.26117194647672175773064114582860e-3L, 1.21546224740281848743149666560464e-2L, 2.61100544998339436713088248557444e-2L, 4.66170074627052749243018566390567e-2L, 7.32585144444135027565872014932387e-2L, 1.05026503471351080481093652651105e-1L }; #else // quadruple precision const long double maxNormForPade[] = { 4.7419931187193005048501568167858103e-5L /* degree = 3 */, 5.8853168473544560470387769480192666e-4L, 2.9216120366601315391789493628113520e-3L, 8.8415758124319434347116734705174308e-3L, 1.9850836029449446668518049562565291e-2L, 3.6688019729653446926585242192447447e-2L, 5.9290962294020186998954055264528393e-2L, 8.6998436081634343903250580992127677e-2L, 1.1880960220216759245467951592883642e-1L }; #endif const int minPadeDegree = matrix_log_min_pade_degree::value; const int maxPadeDegree = matrix_log_max_pade_degree::value; int degree = minPadeDegree; for (; degree <= maxPadeDegree; ++degree) if (normTminusI <= maxNormForPade[degree - minPadeDegree]) break; return degree; } /* \brief Compute Pade approximation to matrix logarithm */ template void matrix_log_compute_pade(MatrixType& result, const MatrixType& T, int degree) { typedef typename NumTraits::Real RealScalar; const int minPadeDegree = 3; const int maxPadeDegree = 11; assert(degree >= minPadeDegree && degree <= maxPadeDegree); const RealScalar nodes[][maxPadeDegree] = { { 0.1127016653792583114820734600217600L, 0.5000000000000000000000000000000000L, // degree 3 0.8872983346207416885179265399782400L }, { 0.0694318442029737123880267555535953L, 0.3300094782075718675986671204483777L, // degree 4 0.6699905217924281324013328795516223L, 0.9305681557970262876119732444464048L }, { 0.0469100770306680036011865608503035L, 0.2307653449471584544818427896498956L, // degree 5 0.5000000000000000000000000000000000L, 0.7692346550528415455181572103501044L, 0.9530899229693319963988134391496965L }, { 0.0337652428984239860938492227530027L, 0.1693953067668677431693002024900473L, // degree 6 0.3806904069584015456847491391596440L, 0.6193095930415984543152508608403560L, 0.8306046932331322568306997975099527L, 0.9662347571015760139061507772469973L }, { 0.0254460438286207377369051579760744L, 0.1292344072003027800680676133596058L, // degree 7 0.2970774243113014165466967939615193L, 0.5000000000000000000000000000000000L, 0.7029225756886985834533032060384807L, 0.8707655927996972199319323866403942L, 0.9745539561713792622630948420239256L }, { 0.0198550717512318841582195657152635L, 0.1016667612931866302042230317620848L, // degree 8 0.2372337950418355070911304754053768L, 0.4082826787521750975302619288199080L, 0.5917173212478249024697380711800920L, 0.7627662049581644929088695245946232L, 0.8983332387068133697957769682379152L, 0.9801449282487681158417804342847365L }, { 0.0159198802461869550822118985481636L, 0.0819844463366821028502851059651326L, // degree 9 0.1933142836497048013456489803292629L, 0.3378732882980955354807309926783317L, 0.5000000000000000000000000000000000L, 0.6621267117019044645192690073216683L, 0.8066857163502951986543510196707371L, 0.9180155536633178971497148940348674L, 0.9840801197538130449177881014518364L }, { 0.0130467357414141399610179939577740L, 0.0674683166555077446339516557882535L, // degree 10 0.1602952158504877968828363174425632L, 0.2833023029353764046003670284171079L, 0.4255628305091843945575869994351400L, 0.5744371694908156054424130005648600L, 0.7166976970646235953996329715828921L, 0.8397047841495122031171636825574368L, 0.9325316833444922553660483442117465L, 0.9869532642585858600389820060422260L }, { 0.0108856709269715035980309994385713L, 0.0564687001159523504624211153480364L, // degree 11 0.1349239972129753379532918739844233L, 0.2404519353965940920371371652706952L, 0.3652284220238275138342340072995692L, 0.5000000000000000000000000000000000L, 0.6347715779761724861657659927004308L, 0.7595480646034059079628628347293048L, 0.8650760027870246620467081260155767L, 0.9435312998840476495375788846519636L, 0.9891143290730284964019690005614287L } }; const RealScalar weights[][maxPadeDegree] = { { 0.2777777777777777777777777777777778L, 0.4444444444444444444444444444444444L, // degree 3 0.2777777777777777777777777777777778L }, { 0.1739274225687269286865319746109997L, 0.3260725774312730713134680253890003L, // degree 4 0.3260725774312730713134680253890003L, 0.1739274225687269286865319746109997L }, { 0.1184634425280945437571320203599587L, 0.2393143352496832340206457574178191L, // degree 5 0.2844444444444444444444444444444444L, 0.2393143352496832340206457574178191L, 0.1184634425280945437571320203599587L }, { 0.0856622461895851725201480710863665L, 0.1803807865240693037849167569188581L, // degree 6 0.2339569672863455236949351719947755L, 0.2339569672863455236949351719947755L, 0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L }, { 0.0647424830844348466353057163395410L, 0.1398526957446383339507338857118898L, // degree 7 0.1909150252525594724751848877444876L, 0.2089795918367346938775510204081633L, 0.1909150252525594724751848877444876L, 0.1398526957446383339507338857118898L, 0.0647424830844348466353057163395410L }, { 0.0506142681451881295762656771549811L, 0.1111905172266872352721779972131204L, // degree 8 0.1568533229389436436689811009933007L, 0.1813418916891809914825752246385978L, 0.1813418916891809914825752246385978L, 0.1568533229389436436689811009933007L, 0.1111905172266872352721779972131204L, 0.0506142681451881295762656771549811L }, { 0.0406371941807872059859460790552618L, 0.0903240803474287020292360156214564L, // degree 9 0.1303053482014677311593714347093164L, 0.1561735385200014200343152032922218L, 0.1651196775006298815822625346434870L, 0.1561735385200014200343152032922218L, 0.1303053482014677311593714347093164L, 0.0903240803474287020292360156214564L, 0.0406371941807872059859460790552618L }, { 0.0333356721543440687967844049466659L, 0.0747256745752902965728881698288487L, // degree 10 0.1095431812579910219977674671140816L, 0.1346333596549981775456134607847347L, 0.1477621123573764350869464973256692L, 0.1477621123573764350869464973256692L, 0.1346333596549981775456134607847347L, 0.1095431812579910219977674671140816L, 0.0747256745752902965728881698288487L, 0.0333356721543440687967844049466659L }, { 0.0278342835580868332413768602212743L, 0.0627901847324523123173471496119701L, // degree 11 0.0931451054638671257130488207158280L, 0.1165968822959952399592618524215876L, 0.1314022722551233310903444349452546L, 0.1364625433889503153572417641681711L, 0.1314022722551233310903444349452546L, 0.1165968822959952399592618524215876L, 0.0931451054638671257130488207158280L, 0.0627901847324523123173471496119701L, 0.0278342835580868332413768602212743L } }; MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) { RealScalar weight = weights[degree-minPadeDegree][k]; RealScalar node = nodes[degree-minPadeDegree][k]; result += weight * (MatrixType::Identity(T.rows(), T.rows()) + node * TminusI) .template triangularView().solve(TminusI); } } /** \brief Compute logarithm of triangular matrices with size > 2. * \details This uses a inverse scale-and-square algorithm. */ template void matrix_log_compute_big(const MatrixType& A, MatrixType& result) { typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; using std::pow; int numberOfSquareRoots = 0; int numberOfExtraSquareRoots = 0; int degree; MatrixType T = A, sqrtT; int maxPadeDegree = matrix_log_max_pade_degree::value; const RealScalar maxNormForPade = maxPadeDegree<= 5? 5.3149729967117310e-1L: // single precision maxPadeDegree<= 7? 2.6429608311114350e-1L: // double precision maxPadeDegree<= 8? 2.32777776523703892094e-1L: // extended precision maxPadeDegree<=10? 1.05026503471351080481093652651105e-1L: // double-double 1.1880960220216759245467951592883642e-1L; // quadruple precision while (true) { RealScalar normTminusI = (T - MatrixType::Identity(T.rows(), T.rows())).cwiseAbs().colwise().sum().maxCoeff(); if (normTminusI < maxNormForPade) { degree = matrix_log_get_pade_degree(normTminusI); int degree2 = matrix_log_get_pade_degree(normTminusI / RealScalar(2)); if ((degree - degree2 <= 1) || (numberOfExtraSquareRoots == 1)) break; ++numberOfExtraSquareRoots; } matrix_sqrt_triangular(T, sqrtT); T = sqrtT.template triangularView(); ++numberOfSquareRoots; } matrix_log_compute_pade(result, T, degree); result *= pow(RealScalar(2), numberOfSquareRoots); } /** \ingroup MatrixFunctions_Module * \class MatrixLogarithmAtomic * \brief Helper class for computing matrix logarithm of atomic matrices. * * Here, an atomic matrix is a triangular matrix whose diagonal entries are close to each other. * * \sa class MatrixFunctionAtomic, MatrixBase::log() */ template class MatrixLogarithmAtomic { public: /** \brief Compute matrix logarithm of atomic matrix * \param[in] A argument of matrix logarithm, should be upper triangular and atomic * \returns The logarithm of \p A. */ MatrixType compute(const MatrixType& A); }; template MatrixType MatrixLogarithmAtomic::compute(const MatrixType& A) { using std::log; MatrixType result(A.rows(), A.rows()); if (A.rows() == 1) result(0,0) = log(A(0,0)); else if (A.rows() == 2) matrix_log_compute_2x2(A, result); else matrix_log_compute_big(A, result); return result; } } // end of namespace internal /** \ingroup MatrixFunctions_Module * * \brief Proxy for the matrix logarithm of some matrix (expression). * * \tparam Derived Type of the argument to the matrix function. * * This class holds the argument to the matrix function until it is * assigned or evaluated for some other reason (so the argument * should not be changed in the meantime). It is the return type of * MatrixBase::log() and most of the time this is the only way it * is used. */ template class MatrixLogarithmReturnValue : public ReturnByValue > { public: typedef typename Derived::Scalar Scalar; typedef typename Derived::Index Index; protected: typedef typename internal::ref_selector::type DerivedNested; public: /** \brief Constructor. * * \param[in] A %Matrix (expression) forming the argument of the matrix logarithm. */ explicit MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { } /** \brief Compute the matrix logarithm. * * \param[out] result Logarithm of \p A, where \A is as specified in the constructor. */ template inline void evalTo(ResultType& result) const { typedef typename internal::nested_eval::type DerivedEvalType; typedef typename internal::remove_all::type DerivedEvalTypeClean; typedef internal::traits Traits; static const int RowsAtCompileTime = Traits::RowsAtCompileTime; static const int ColsAtCompileTime = Traits::ColsAtCompileTime; typedef std::complex::Real> ComplexScalar; typedef Matrix DynMatrixType; typedef internal::MatrixLogarithmAtomic AtomicType; AtomicType atomic; internal::matrix_function_compute::run(m_A, atomic, result); } Index rows() const { return m_A.rows(); } Index cols() const { return m_A.cols(); } private: const DerivedNested m_A; }; namespace internal { template struct traits > { typedef typename Derived::PlainObject ReturnType; }; } /********** MatrixBase method **********/ template const MatrixLogarithmReturnValue MatrixBase::log() const { eigen_assert(rows() == cols()); return MatrixLogarithmReturnValue(derived()); } } // end namespace Eigen #endif // EIGEN_MATRIX_LOGARITHM RcppEigen/inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h0000644000176200001440000005570513100733213026530 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012, 2013 Chen-Pang He // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIX_POWER #define EIGEN_MATRIX_POWER namespace Eigen { template class MatrixPower; /** * \ingroup MatrixFunctions_Module * * \brief Proxy for the matrix power of some matrix. * * \tparam MatrixType type of the base, a matrix. * * This class holds the arguments to the matrix power until it is * assigned or evaluated for some other reason (so the argument * should not be changed in the meantime). It is the return type of * MatrixPower::operator() and related functions and most of the * time this is the only way it is used. */ /* TODO This class is only used by MatrixPower, so it should be nested * into MatrixPower, like MatrixPower::ReturnValue. However, my * compiler complained about unused template parameter in the * following declaration in namespace internal. * * template * struct traits::ReturnValue>; */ template class MatrixPowerParenthesesReturnValue : public ReturnByValue< MatrixPowerParenthesesReturnValue > { public: typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; /** * \brief Constructor. * * \param[in] pow %MatrixPower storing the base. * \param[in] p scalar, the exponent of the matrix power. */ MatrixPowerParenthesesReturnValue(MatrixPower& pow, RealScalar p) : m_pow(pow), m_p(p) { } /** * \brief Compute the matrix power. * * \param[out] result */ template inline void evalTo(ResultType& res) const { m_pow.compute(res, m_p); } Index rows() const { return m_pow.rows(); } Index cols() const { return m_pow.cols(); } private: MatrixPower& m_pow; const RealScalar m_p; }; /** * \ingroup MatrixFunctions_Module * * \brief Class for computing matrix powers. * * \tparam MatrixType type of the base, expected to be an instantiation * of the Matrix class template. * * This class is capable of computing triangular real/complex matrices * raised to a power in the interval \f$ (-1, 1) \f$. * * \note Currently this class is only used by MatrixPower. One may * insist that this be nested into MatrixPower. This class is here to * faciliate future development of triangular matrix functions. */ template class MatrixPowerAtomic : internal::noncopyable { private: enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef std::complex ComplexScalar; typedef typename MatrixType::Index Index; typedef Block ResultType; const MatrixType& m_A; RealScalar m_p; void computePade(int degree, const MatrixType& IminusT, ResultType& res) const; void compute2x2(ResultType& res, RealScalar p) const; void computeBig(ResultType& res) const; static int getPadeDegree(float normIminusT); static int getPadeDegree(double normIminusT); static int getPadeDegree(long double normIminusT); static ComplexScalar computeSuperDiag(const ComplexScalar&, const ComplexScalar&, RealScalar p); static RealScalar computeSuperDiag(RealScalar, RealScalar, RealScalar p); public: /** * \brief Constructor. * * \param[in] T the base of the matrix power. * \param[in] p the exponent of the matrix power, should be in * \f$ (-1, 1) \f$. * * The class stores a reference to T, so it should not be changed * (or destroyed) before evaluation. Only the upper triangular * part of T is read. */ MatrixPowerAtomic(const MatrixType& T, RealScalar p); /** * \brief Compute the matrix power. * * \param[out] res \f$ A^p \f$ where A and p are specified in the * constructor. */ void compute(ResultType& res) const; }; template MatrixPowerAtomic::MatrixPowerAtomic(const MatrixType& T, RealScalar p) : m_A(T), m_p(p) { eigen_assert(T.rows() == T.cols()); eigen_assert(p > -1 && p < 1); } template void MatrixPowerAtomic::compute(ResultType& res) const { using std::pow; switch (m_A.rows()) { case 0: break; case 1: res(0,0) = pow(m_A(0,0), m_p); break; case 2: compute2x2(res, m_p); break; default: computeBig(res); } } template void MatrixPowerAtomic::computePade(int degree, const MatrixType& IminusT, ResultType& res) const { int i = 2*degree; res = (m_p-degree) / (2*i-2) * IminusT; for (--i; i; --i) { res = (MatrixType::Identity(IminusT.rows(), IminusT.cols()) + res).template triangularView() .solve((i==1 ? -m_p : i&1 ? (-m_p-i/2)/(2*i) : (m_p-i/2)/(2*i-2)) * IminusT).eval(); } res += MatrixType::Identity(IminusT.rows(), IminusT.cols()); } // This function assumes that res has the correct size (see bug 614) template void MatrixPowerAtomic::compute2x2(ResultType& res, RealScalar p) const { using std::abs; using std::pow; res.coeffRef(0,0) = pow(m_A.coeff(0,0), p); for (Index i=1; i < m_A.cols(); ++i) { res.coeffRef(i,i) = pow(m_A.coeff(i,i), p); if (m_A.coeff(i-1,i-1) == m_A.coeff(i,i)) res.coeffRef(i-1,i) = p * pow(m_A.coeff(i,i), p-1); else if (2*abs(m_A.coeff(i-1,i-1)) < abs(m_A.coeff(i,i)) || 2*abs(m_A.coeff(i,i)) < abs(m_A.coeff(i-1,i-1))) res.coeffRef(i-1,i) = (res.coeff(i,i)-res.coeff(i-1,i-1)) / (m_A.coeff(i,i)-m_A.coeff(i-1,i-1)); else res.coeffRef(i-1,i) = computeSuperDiag(m_A.coeff(i,i), m_A.coeff(i-1,i-1), p); res.coeffRef(i-1,i) *= m_A.coeff(i-1,i); } } template void MatrixPowerAtomic::computeBig(ResultType& res) const { using std::ldexp; const int digits = std::numeric_limits::digits; const RealScalar maxNormForPade = digits <= 24? 4.3386528e-1L // single precision : digits <= 53? 2.789358995219730e-1L // double precision : digits <= 64? 2.4471944416607995472e-1L // extended precision : digits <= 106? 1.1016843812851143391275867258512e-1L // double-double : 9.134603732914548552537150753385375e-2L; // quadruple precision MatrixType IminusT, sqrtT, T = m_A.template triangularView(); RealScalar normIminusT; int degree, degree2, numberOfSquareRoots = 0; bool hasExtraSquareRoot = false; for (Index i=0; i < m_A.cols(); ++i) eigen_assert(m_A(i,i) != RealScalar(0)); while (true) { IminusT = MatrixType::Identity(m_A.rows(), m_A.cols()) - T; normIminusT = IminusT.cwiseAbs().colwise().sum().maxCoeff(); if (normIminusT < maxNormForPade) { degree = getPadeDegree(normIminusT); degree2 = getPadeDegree(normIminusT/2); if (degree - degree2 <= 1 || hasExtraSquareRoot) break; hasExtraSquareRoot = true; } matrix_sqrt_triangular(T, sqrtT); T = sqrtT.template triangularView(); ++numberOfSquareRoots; } computePade(degree, IminusT, res); for (; numberOfSquareRoots; --numberOfSquareRoots) { compute2x2(res, ldexp(m_p, -numberOfSquareRoots)); res = res.template triangularView() * res; } compute2x2(res, m_p); } template inline int MatrixPowerAtomic::getPadeDegree(float normIminusT) { const float maxNormForPade[] = { 2.8064004e-1f /* degree = 3 */ , 4.3386528e-1f }; int degree = 3; for (; degree <= 4; ++degree) if (normIminusT <= maxNormForPade[degree - 3]) break; return degree; } template inline int MatrixPowerAtomic::getPadeDegree(double normIminusT) { const double maxNormForPade[] = { 1.884160592658218e-2 /* degree = 3 */ , 6.038881904059573e-2, 1.239917516308172e-1, 1.999045567181744e-1, 2.789358995219730e-1 }; int degree = 3; for (; degree <= 7; ++degree) if (normIminusT <= maxNormForPade[degree - 3]) break; return degree; } template inline int MatrixPowerAtomic::getPadeDegree(long double normIminusT) { #if LDBL_MANT_DIG == 53 const int maxPadeDegree = 7; const double maxNormForPade[] = { 1.884160592658218e-2L /* degree = 3 */ , 6.038881904059573e-2L, 1.239917516308172e-1L, 1.999045567181744e-1L, 2.789358995219730e-1L }; #elif LDBL_MANT_DIG <= 64 const int maxPadeDegree = 8; const long double maxNormForPade[] = { 6.3854693117491799460e-3L /* degree = 3 */ , 2.6394893435456973676e-2L, 6.4216043030404063729e-2L, 1.1701165502926694307e-1L, 1.7904284231268670284e-1L, 2.4471944416607995472e-1L }; #elif LDBL_MANT_DIG <= 106 const int maxPadeDegree = 10; const double maxNormForPade[] = { 1.0007161601787493236741409687186e-4L /* degree = 3 */ , 1.0007161601787493236741409687186e-3L, 4.7069769360887572939882574746264e-3L, 1.3220386624169159689406653101695e-2L, 2.8063482381631737920612944054906e-2L, 4.9625993951953473052385361085058e-2L, 7.7367040706027886224557538328171e-2L, 1.1016843812851143391275867258512e-1L }; #else const int maxPadeDegree = 10; const double maxNormForPade[] = { 5.524506147036624377378713555116378e-5L /* degree = 3 */ , 6.640600568157479679823602193345995e-4L, 3.227716520106894279249709728084626e-3L, 9.619593944683432960546978734646284e-3L, 2.134595382433742403911124458161147e-2L, 3.908166513900489428442993794761185e-2L, 6.266780814639442865832535460550138e-2L, 9.134603732914548552537150753385375e-2L }; #endif int degree = 3; for (; degree <= maxPadeDegree; ++degree) if (normIminusT <= maxNormForPade[degree - 3]) break; return degree; } template inline typename MatrixPowerAtomic::ComplexScalar MatrixPowerAtomic::computeSuperDiag(const ComplexScalar& curr, const ComplexScalar& prev, RealScalar p) { using std::ceil; using std::exp; using std::log; using std::sinh; ComplexScalar logCurr = log(curr); ComplexScalar logPrev = log(prev); int unwindingNumber = ceil((numext::imag(logCurr - logPrev) - RealScalar(EIGEN_PI)) / RealScalar(2*EIGEN_PI)); ComplexScalar w = numext::log1p((curr-prev)/prev)/RealScalar(2) + ComplexScalar(0, EIGEN_PI*unwindingNumber); return RealScalar(2) * exp(RealScalar(0.5) * p * (logCurr + logPrev)) * sinh(p * w) / (curr - prev); } template inline typename MatrixPowerAtomic::RealScalar MatrixPowerAtomic::computeSuperDiag(RealScalar curr, RealScalar prev, RealScalar p) { using std::exp; using std::log; using std::sinh; RealScalar w = numext::log1p((curr-prev)/prev)/RealScalar(2); return 2 * exp(p * (log(curr) + log(prev)) / 2) * sinh(p * w) / (curr - prev); } /** * \ingroup MatrixFunctions_Module * * \brief Class for computing matrix powers. * * \tparam MatrixType type of the base, expected to be an instantiation * of the Matrix class template. * * This class is capable of computing real/complex matrices raised to * an arbitrary real power. Meanwhile, it saves the result of Schur * decomposition if an non-integral power has even been calculated. * Therefore, if you want to compute multiple (>= 2) matrix powers * for the same matrix, using the class directly is more efficient than * calling MatrixBase::pow(). * * Example: * \include MatrixPower_optimal.cpp * Output: \verbinclude MatrixPower_optimal.out */ template class MatrixPower : internal::noncopyable { private: typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; public: /** * \brief Constructor. * * \param[in] A the base of the matrix power. * * The class stores a reference to A, so it should not be changed * (or destroyed) before evaluation. */ explicit MatrixPower(const MatrixType& A) : m_A(A), m_conditionNumber(0), m_rank(A.cols()), m_nulls(0) { eigen_assert(A.rows() == A.cols()); } /** * \brief Returns the matrix power. * * \param[in] p exponent, a real scalar. * \return The expression \f$ A^p \f$, where A is specified in the * constructor. */ const MatrixPowerParenthesesReturnValue operator()(RealScalar p) { return MatrixPowerParenthesesReturnValue(*this, p); } /** * \brief Compute the matrix power. * * \param[in] p exponent, a real scalar. * \param[out] res \f$ A^p \f$ where A is specified in the * constructor. */ template void compute(ResultType& res, RealScalar p); Index rows() const { return m_A.rows(); } Index cols() const { return m_A.cols(); } private: typedef std::complex ComplexScalar; typedef Matrix ComplexMatrix; /** \brief Reference to the base of matrix power. */ typename MatrixType::Nested m_A; /** \brief Temporary storage. */ MatrixType m_tmp; /** \brief Store the result of Schur decomposition. */ ComplexMatrix m_T, m_U; /** \brief Store fractional power of m_T. */ ComplexMatrix m_fT; /** * \brief Condition number of m_A. * * It is initialized as 0 to avoid performing unnecessary Schur * decomposition, which is the bottleneck. */ RealScalar m_conditionNumber; /** \brief Rank of m_A. */ Index m_rank; /** \brief Rank deficiency of m_A. */ Index m_nulls; /** * \brief Split p into integral part and fractional part. * * \param[in] p The exponent. * \param[out] p The fractional part ranging in \f$ (-1, 1) \f$. * \param[out] intpart The integral part. * * Only if the fractional part is nonzero, it calls initialize(). */ void split(RealScalar& p, RealScalar& intpart); /** \brief Perform Schur decomposition for fractional power. */ void initialize(); template void computeIntPower(ResultType& res, RealScalar p); template void computeFracPower(ResultType& res, RealScalar p); template static void revertSchur( Matrix& res, const ComplexMatrix& T, const ComplexMatrix& U); template static void revertSchur( Matrix& res, const ComplexMatrix& T, const ComplexMatrix& U); }; template template void MatrixPower::compute(ResultType& res, RealScalar p) { using std::pow; switch (cols()) { case 0: break; case 1: res(0,0) = pow(m_A.coeff(0,0), p); break; default: RealScalar intpart; split(p, intpart); res = MatrixType::Identity(rows(), cols()); computeIntPower(res, intpart); if (p) computeFracPower(res, p); } } template void MatrixPower::split(RealScalar& p, RealScalar& intpart) { using std::floor; using std::pow; intpart = floor(p); p -= intpart; // Perform Schur decomposition if it is not yet performed and the power is // not an integer. if (!m_conditionNumber && p) initialize(); // Choose the more stable of intpart = floor(p) and intpart = ceil(p). if (p > RealScalar(0.5) && p > (1-p) * pow(m_conditionNumber, p)) { --p; ++intpart; } } template void MatrixPower::initialize() { const ComplexSchur schurOfA(m_A); JacobiRotation rot; ComplexScalar eigenvalue; m_fT.resizeLike(m_A); m_T = schurOfA.matrixT(); m_U = schurOfA.matrixU(); m_conditionNumber = m_T.diagonal().array().abs().maxCoeff() / m_T.diagonal().array().abs().minCoeff(); // Move zero eigenvalues to the bottom right corner. for (Index i = cols()-1; i>=0; --i) { if (m_rank <= 2) return; if (m_T.coeff(i,i) == RealScalar(0)) { for (Index j=i+1; j < m_rank; ++j) { eigenvalue = m_T.coeff(j,j); rot.makeGivens(m_T.coeff(j-1,j), eigenvalue); m_T.applyOnTheRight(j-1, j, rot); m_T.applyOnTheLeft(j-1, j, rot.adjoint()); m_T.coeffRef(j-1,j-1) = eigenvalue; m_T.coeffRef(j,j) = RealScalar(0); m_U.applyOnTheRight(j-1, j, rot); } --m_rank; } } m_nulls = rows() - m_rank; if (m_nulls) { eigen_assert(m_T.bottomRightCorner(m_nulls, m_nulls).isZero() && "Base of matrix power should be invertible or with a semisimple zero eigenvalue."); m_fT.bottomRows(m_nulls).fill(RealScalar(0)); } } template template void MatrixPower::computeIntPower(ResultType& res, RealScalar p) { using std::abs; using std::fmod; RealScalar pp = abs(p); if (p<0) m_tmp = m_A.inverse(); else m_tmp = m_A; while (true) { if (fmod(pp, 2) >= 1) res = m_tmp * res; pp /= 2; if (pp < 1) break; m_tmp *= m_tmp; } } template template void MatrixPower::computeFracPower(ResultType& res, RealScalar p) { Block blockTp(m_fT, 0, 0, m_rank, m_rank); eigen_assert(m_conditionNumber); eigen_assert(m_rank + m_nulls == rows()); MatrixPowerAtomic(m_T.topLeftCorner(m_rank, m_rank), p).compute(blockTp); if (m_nulls) { m_fT.topRightCorner(m_rank, m_nulls) = m_T.topLeftCorner(m_rank, m_rank).template triangularView() .solve(blockTp * m_T.topRightCorner(m_rank, m_nulls)); } revertSchur(m_tmp, m_fT, m_U); res = m_tmp * res; } template template inline void MatrixPower::revertSchur( Matrix& res, const ComplexMatrix& T, const ComplexMatrix& U) { res.noalias() = U * (T.template triangularView() * U.adjoint()); } template template inline void MatrixPower::revertSchur( Matrix& res, const ComplexMatrix& T, const ComplexMatrix& U) { res.noalias() = (U * (T.template triangularView() * U.adjoint())).real(); } /** * \ingroup MatrixFunctions_Module * * \brief Proxy for the matrix power of some matrix (expression). * * \tparam Derived type of the base, a matrix (expression). * * This class holds the arguments to the matrix power until it is * assigned or evaluated for some other reason (so the argument * should not be changed in the meantime). It is the return type of * MatrixBase::pow() and related functions and most of the * time this is the only way it is used. */ template class MatrixPowerReturnValue : public ReturnByValue< MatrixPowerReturnValue > { public: typedef typename Derived::PlainObject PlainObject; typedef typename Derived::RealScalar RealScalar; typedef typename Derived::Index Index; /** * \brief Constructor. * * \param[in] A %Matrix (expression), the base of the matrix power. * \param[in] p real scalar, the exponent of the matrix power. */ MatrixPowerReturnValue(const Derived& A, RealScalar p) : m_A(A), m_p(p) { } /** * \brief Compute the matrix power. * * \param[out] result \f$ A^p \f$ where \p A and \p p are as in the * constructor. */ template inline void evalTo(ResultType& res) const { MatrixPower(m_A.eval()).compute(res, m_p); } Index rows() const { return m_A.rows(); } Index cols() const { return m_A.cols(); } private: const Derived& m_A; const RealScalar m_p; }; /** * \ingroup MatrixFunctions_Module * * \brief Proxy for the matrix power of some matrix (expression). * * \tparam Derived type of the base, a matrix (expression). * * This class holds the arguments to the matrix power until it is * assigned or evaluated for some other reason (so the argument * should not be changed in the meantime). It is the return type of * MatrixBase::pow() and related functions and most of the * time this is the only way it is used. */ template class MatrixComplexPowerReturnValue : public ReturnByValue< MatrixComplexPowerReturnValue > { public: typedef typename Derived::PlainObject PlainObject; typedef typename std::complex ComplexScalar; typedef typename Derived::Index Index; /** * \brief Constructor. * * \param[in] A %Matrix (expression), the base of the matrix power. * \param[in] p complex scalar, the exponent of the matrix power. */ MatrixComplexPowerReturnValue(const Derived& A, const ComplexScalar& p) : m_A(A), m_p(p) { } /** * \brief Compute the matrix power. * * Because \p p is complex, \f$ A^p \f$ is simply evaluated as \f$ * \exp(p \log(A)) \f$. * * \param[out] result \f$ A^p \f$ where \p A and \p p are as in the * constructor. */ template inline void evalTo(ResultType& res) const { res = (m_p * m_A.log()).exp(); } Index rows() const { return m_A.rows(); } Index cols() const { return m_A.cols(); } private: const Derived& m_A; const ComplexScalar m_p; }; namespace internal { template struct traits< MatrixPowerParenthesesReturnValue > { typedef typename MatrixPowerType::PlainObject ReturnType; }; template struct traits< MatrixPowerReturnValue > { typedef typename Derived::PlainObject ReturnType; }; template struct traits< MatrixComplexPowerReturnValue > { typedef typename Derived::PlainObject ReturnType; }; } template const MatrixPowerReturnValue MatrixBase::pow(const RealScalar& p) const { return MatrixPowerReturnValue(derived(), p); } template const MatrixComplexPowerReturnValue MatrixBase::pow(const std::complex& p) const { return MatrixComplexPowerReturnValue(derived(), p); } } // namespace Eigen #endif // EIGEN_MATRIX_POWER RcppEigen/inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h0000644000176200001440000005532313235710577027236 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2011, 2013 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIX_FUNCTION #define EIGEN_MATRIX_FUNCTION #include "StemFunction.h" namespace Eigen { namespace internal { /** \brief Maximum distance allowed between eigenvalues to be considered "close". */ static const float matrix_function_separation = 0.1f; /** \ingroup MatrixFunctions_Module * \class MatrixFunctionAtomic * \brief Helper class for computing matrix functions of atomic matrices. * * Here, an atomic matrix is a triangular matrix whose diagonal entries are close to each other. */ template class MatrixFunctionAtomic { public: typedef typename MatrixType::Scalar Scalar; typedef typename stem_function::type StemFunction; /** \brief Constructor * \param[in] f matrix function to compute. */ MatrixFunctionAtomic(StemFunction f) : m_f(f) { } /** \brief Compute matrix function of atomic matrix * \param[in] A argument of matrix function, should be upper triangular and atomic * \returns f(A), the matrix function evaluated at the given matrix */ MatrixType compute(const MatrixType& A); private: StemFunction* m_f; }; template typename NumTraits::Real matrix_function_compute_mu(const MatrixType& A) { typedef typename plain_col_type::type VectorType; typename MatrixType::Index rows = A.rows(); const MatrixType N = MatrixType::Identity(rows, rows) - A; VectorType e = VectorType::Ones(rows); N.template triangularView().solveInPlace(e); return e.cwiseAbs().maxCoeff(); } template MatrixType MatrixFunctionAtomic::compute(const MatrixType& A) { // TODO: Use that A is upper triangular typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; Index rows = A.rows(); Scalar avgEival = A.trace() / Scalar(RealScalar(rows)); MatrixType Ashifted = A - avgEival * MatrixType::Identity(rows, rows); RealScalar mu = matrix_function_compute_mu(Ashifted); MatrixType F = m_f(avgEival, 0) * MatrixType::Identity(rows, rows); MatrixType P = Ashifted; MatrixType Fincr; for (Index s = 1; s < 1.1 * rows + 10; s++) { // upper limit is fairly arbitrary Fincr = m_f(avgEival, static_cast(s)) * P; F += Fincr; P = Scalar(RealScalar(1.0/(s + 1))) * P * Ashifted; // test whether Taylor series converged const RealScalar F_norm = F.cwiseAbs().rowwise().sum().maxCoeff(); const RealScalar Fincr_norm = Fincr.cwiseAbs().rowwise().sum().maxCoeff(); if (Fincr_norm < NumTraits::epsilon() * F_norm) { RealScalar delta = 0; RealScalar rfactorial = 1; for (Index r = 0; r < rows; r++) { RealScalar mx = 0; for (Index i = 0; i < rows; i++) mx = (std::max)(mx, std::abs(m_f(Ashifted(i, i) + avgEival, static_cast(s+r)))); if (r != 0) rfactorial *= RealScalar(r); delta = (std::max)(delta, mx / rfactorial); } const RealScalar P_norm = P.cwiseAbs().rowwise().sum().maxCoeff(); if (mu * delta * P_norm < NumTraits::epsilon() * F_norm) // series converged break; } } return F; } /** \brief Find cluster in \p clusters containing some value * \param[in] key Value to find * \returns Iterator to cluster containing \p key, or \c clusters.end() if no cluster in \p m_clusters * contains \p key. */ template typename ListOfClusters::iterator matrix_function_find_cluster(Index key, ListOfClusters& clusters) { typename std::list::iterator j; for (typename ListOfClusters::iterator i = clusters.begin(); i != clusters.end(); ++i) { j = std::find(i->begin(), i->end(), key); if (j != i->end()) return i; } return clusters.end(); } /** \brief Partition eigenvalues in clusters of ei'vals close to each other * * \param[in] eivals Eigenvalues * \param[out] clusters Resulting partition of eigenvalues * * The partition satisfies the following two properties: * # Any eigenvalue in a certain cluster is at most matrix_function_separation() away from another eigenvalue * in the same cluster. * # The distance between two eigenvalues in different clusters is more than matrix_function_separation(). * The implementation follows Algorithm 4.1 in the paper of Davies and Higham. */ template void matrix_function_partition_eigenvalues(const EivalsType& eivals, std::list& clusters) { typedef typename EivalsType::Index Index; typedef typename EivalsType::RealScalar RealScalar; for (Index i=0; i::iterator qi = matrix_function_find_cluster(i, clusters); if (qi == clusters.end()) { Cluster l; l.push_back(i); clusters.push_back(l); qi = clusters.end(); --qi; } // Look for other element to add to the set for (Index j=i+1; jbegin(), qi->end(), j) == qi->end()) { typename std::list::iterator qj = matrix_function_find_cluster(j, clusters); if (qj == clusters.end()) { qi->push_back(j); } else { qi->insert(qi->end(), qj->begin(), qj->end()); clusters.erase(qj); } } } } } /** \brief Compute size of each cluster given a partitioning */ template void matrix_function_compute_cluster_size(const ListOfClusters& clusters, Matrix& clusterSize) { const Index numClusters = static_cast(clusters.size()); clusterSize.setZero(numClusters); Index clusterIndex = 0; for (typename ListOfClusters::const_iterator cluster = clusters.begin(); cluster != clusters.end(); ++cluster) { clusterSize[clusterIndex] = cluster->size(); ++clusterIndex; } } /** \brief Compute start of each block using clusterSize */ template void matrix_function_compute_block_start(const VectorType& clusterSize, VectorType& blockStart) { blockStart.resize(clusterSize.rows()); blockStart(0) = 0; for (typename VectorType::Index i = 1; i < clusterSize.rows(); i++) { blockStart(i) = blockStart(i-1) + clusterSize(i-1); } } /** \brief Compute mapping of eigenvalue indices to cluster indices */ template void matrix_function_compute_map(const EivalsType& eivals, const ListOfClusters& clusters, VectorType& eivalToCluster) { typedef typename EivalsType::Index Index; eivalToCluster.resize(eivals.rows()); Index clusterIndex = 0; for (typename ListOfClusters::const_iterator cluster = clusters.begin(); cluster != clusters.end(); ++cluster) { for (Index i = 0; i < eivals.rows(); ++i) { if (std::find(cluster->begin(), cluster->end(), i) != cluster->end()) { eivalToCluster[i] = clusterIndex; } } ++clusterIndex; } } /** \brief Compute permutation which groups ei'vals in same cluster together */ template void matrix_function_compute_permutation(const DynVectorType& blockStart, const DynVectorType& eivalToCluster, VectorType& permutation) { typedef typename VectorType::Index Index; DynVectorType indexNextEntry = blockStart; permutation.resize(eivalToCluster.rows()); for (Index i = 0; i < eivalToCluster.rows(); i++) { Index cluster = eivalToCluster[i]; permutation[i] = indexNextEntry[cluster]; ++indexNextEntry[cluster]; } } /** \brief Permute Schur decomposition in U and T according to permutation */ template void matrix_function_permute_schur(VectorType& permutation, MatrixType& U, MatrixType& T) { typedef typename VectorType::Index Index; for (Index i = 0; i < permutation.rows() - 1; i++) { Index j; for (j = i; j < permutation.rows(); j++) { if (permutation(j) == i) break; } eigen_assert(permutation(j) == i); for (Index k = j-1; k >= i; k--) { JacobiRotation rotation; rotation.makeGivens(T(k, k+1), T(k+1, k+1) - T(k, k)); T.applyOnTheLeft(k, k+1, rotation.adjoint()); T.applyOnTheRight(k, k+1, rotation); U.applyOnTheRight(k, k+1, rotation); std::swap(permutation.coeffRef(k), permutation.coeffRef(k+1)); } } } /** \brief Compute block diagonal part of matrix function. * * This routine computes the matrix function applied to the block diagonal part of \p T (which should be * upper triangular), with the blocking given by \p blockStart and \p clusterSize. The matrix function of * each diagonal block is computed by \p atomic. The off-diagonal parts of \p fT are set to zero. */ template void matrix_function_compute_block_atomic(const MatrixType& T, AtomicType& atomic, const VectorType& blockStart, const VectorType& clusterSize, MatrixType& fT) { fT.setZero(T.rows(), T.cols()); for (typename VectorType::Index i = 0; i < clusterSize.rows(); ++i) { fT.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i)) = atomic.compute(T.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i))); } } /** \brief Solve a triangular Sylvester equation AX + XB = C * * \param[in] A the matrix A; should be square and upper triangular * \param[in] B the matrix B; should be square and upper triangular * \param[in] C the matrix C; should have correct size. * * \returns the solution X. * * If A is m-by-m and B is n-by-n, then both C and X are m-by-n. The (i,j)-th component of the Sylvester * equation is * \f[ * \sum_{k=i}^m A_{ik} X_{kj} + \sum_{k=1}^j X_{ik} B_{kj} = C_{ij}. * \f] * This can be re-arranged to yield: * \f[ * X_{ij} = \frac{1}{A_{ii} + B_{jj}} \Bigl( C_{ij} * - \sum_{k=i+1}^m A_{ik} X_{kj} - \sum_{k=1}^{j-1} X_{ik} B_{kj} \Bigr). * \f] * It is assumed that A and B are such that the numerator is never zero (otherwise the Sylvester equation * does not have a unique solution). In that case, these equations can be evaluated in the order * \f$ i=m,\ldots,1 \f$ and \f$ j=1,\ldots,n \f$. */ template MatrixType matrix_function_solve_triangular_sylvester(const MatrixType& A, const MatrixType& B, const MatrixType& C) { eigen_assert(A.rows() == A.cols()); eigen_assert(A.isUpperTriangular()); eigen_assert(B.rows() == B.cols()); eigen_assert(B.isUpperTriangular()); eigen_assert(C.rows() == A.rows()); eigen_assert(C.cols() == B.rows()); typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; Index m = A.rows(); Index n = B.rows(); MatrixType X(m, n); for (Index i = m - 1; i >= 0; --i) { for (Index j = 0; j < n; ++j) { // Compute AX = \sum_{k=i+1}^m A_{ik} X_{kj} Scalar AX; if (i == m - 1) { AX = 0; } else { Matrix AXmatrix = A.row(i).tail(m-1-i) * X.col(j).tail(m-1-i); AX = AXmatrix(0,0); } // Compute XB = \sum_{k=1}^{j-1} X_{ik} B_{kj} Scalar XB; if (j == 0) { XB = 0; } else { Matrix XBmatrix = X.row(i).head(j) * B.col(j).head(j); XB = XBmatrix(0,0); } X(i,j) = (C(i,j) - AX - XB) / (A(i,i) + B(j,j)); } } return X; } /** \brief Compute part of matrix function above block diagonal. * * This routine completes the computation of \p fT, denoting a matrix function applied to the triangular * matrix \p T. It assumes that the block diagonal part of \p fT has already been computed. The part below * the diagonal is zero, because \p T is upper triangular. */ template void matrix_function_compute_above_diagonal(const MatrixType& T, const VectorType& blockStart, const VectorType& clusterSize, MatrixType& fT) { typedef internal::traits Traits; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; static const int RowsAtCompileTime = Traits::RowsAtCompileTime; static const int ColsAtCompileTime = Traits::ColsAtCompileTime; static const int Options = MatrixType::Options; typedef Matrix DynMatrixType; for (Index k = 1; k < clusterSize.rows(); k++) { for (Index i = 0; i < clusterSize.rows() - k; i++) { // compute (i, i+k) block DynMatrixType A = T.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i)); DynMatrixType B = -T.block(blockStart(i+k), blockStart(i+k), clusterSize(i+k), clusterSize(i+k)); DynMatrixType C = fT.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i)) * T.block(blockStart(i), blockStart(i+k), clusterSize(i), clusterSize(i+k)); C -= T.block(blockStart(i), blockStart(i+k), clusterSize(i), clusterSize(i+k)) * fT.block(blockStart(i+k), blockStart(i+k), clusterSize(i+k), clusterSize(i+k)); for (Index m = i + 1; m < i + k; m++) { C += fT.block(blockStart(i), blockStart(m), clusterSize(i), clusterSize(m)) * T.block(blockStart(m), blockStart(i+k), clusterSize(m), clusterSize(i+k)); C -= T.block(blockStart(i), blockStart(m), clusterSize(i), clusterSize(m)) * fT.block(blockStart(m), blockStart(i+k), clusterSize(m), clusterSize(i+k)); } fT.block(blockStart(i), blockStart(i+k), clusterSize(i), clusterSize(i+k)) = matrix_function_solve_triangular_sylvester(A, B, C); } } } /** \ingroup MatrixFunctions_Module * \brief Class for computing matrix functions. * \tparam MatrixType type of the argument of the matrix function, * expected to be an instantiation of the Matrix class template. * \tparam AtomicType type for computing matrix function of atomic blocks. * \tparam IsComplex used internally to select correct specialization. * * This class implements the Schur-Parlett algorithm for computing matrix functions. The spectrum of the * matrix is divided in clustered of eigenvalues that lies close together. This class delegates the * computation of the matrix function on every block corresponding to these clusters to an object of type * \p AtomicType and uses these results to compute the matrix function of the whole matrix. The class * \p AtomicType should have a \p compute() member function for computing the matrix function of a block. * * \sa class MatrixFunctionAtomic, class MatrixLogarithmAtomic */ template ::Scalar>::IsComplex> struct matrix_function_compute { /** \brief Compute the matrix function. * * \param[in] A argument of matrix function, should be a square matrix. * \param[in] atomic class for computing matrix function of atomic blocks. * \param[out] result the function \p f applied to \p A, as * specified in the constructor. * * See MatrixBase::matrixFunction() for details on how this computation * is implemented. */ template static void run(const MatrixType& A, AtomicType& atomic, ResultType &result); }; /** \internal \ingroup MatrixFunctions_Module * \brief Partial specialization of MatrixFunction for real matrices * * This converts the real matrix to a complex matrix, compute the matrix function of that matrix, and then * converts the result back to a real matrix. */ template struct matrix_function_compute { template static void run(const MatA& A, AtomicType& atomic, ResultType &result) { typedef internal::traits Traits; typedef typename Traits::Scalar Scalar; static const int Rows = Traits::RowsAtCompileTime, Cols = Traits::ColsAtCompileTime; static const int MaxRows = Traits::MaxRowsAtCompileTime, MaxCols = Traits::MaxColsAtCompileTime; typedef std::complex ComplexScalar; typedef Matrix ComplexMatrix; ComplexMatrix CA = A.template cast(); ComplexMatrix Cresult; matrix_function_compute::run(CA, atomic, Cresult); result = Cresult.real(); } }; /** \internal \ingroup MatrixFunctions_Module * \brief Partial specialization of MatrixFunction for complex matrices */ template struct matrix_function_compute { template static void run(const MatA& A, AtomicType& atomic, ResultType &result) { typedef internal::traits Traits; // compute Schur decomposition of A const ComplexSchur schurOfA(A); MatrixType T = schurOfA.matrixT(); MatrixType U = schurOfA.matrixU(); // partition eigenvalues into clusters of ei'vals "close" to each other std::list > clusters; matrix_function_partition_eigenvalues(T.diagonal(), clusters); // compute size of each cluster Matrix clusterSize; matrix_function_compute_cluster_size(clusters, clusterSize); // blockStart[i] is row index at which block corresponding to i-th cluster starts Matrix blockStart; matrix_function_compute_block_start(clusterSize, blockStart); // compute map so that eivalToCluster[i] = j means that i-th ei'val is in j-th cluster Matrix eivalToCluster; matrix_function_compute_map(T.diagonal(), clusters, eivalToCluster); // compute permutation which groups ei'vals in same cluster together Matrix permutation; matrix_function_compute_permutation(blockStart, eivalToCluster, permutation); // permute Schur decomposition matrix_function_permute_schur(permutation, U, T); // compute result MatrixType fT; // matrix function applied to T matrix_function_compute_block_atomic(T, atomic, blockStart, clusterSize, fT); matrix_function_compute_above_diagonal(T, blockStart, clusterSize, fT); result = U * (fT.template triangularView() * U.adjoint()); } }; } // end of namespace internal /** \ingroup MatrixFunctions_Module * * \brief Proxy for the matrix function of some matrix (expression). * * \tparam Derived Type of the argument to the matrix function. * * This class holds the argument to the matrix function until it is assigned or evaluated for some other * reason (so the argument should not be changed in the meantime). It is the return type of * matrixBase::matrixFunction() and related functions and most of the time this is the only way it is used. */ template class MatrixFunctionReturnValue : public ReturnByValue > { public: typedef typename Derived::Scalar Scalar; typedef typename Derived::Index Index; typedef typename internal::stem_function::type StemFunction; protected: typedef typename internal::ref_selector::type DerivedNested; public: /** \brief Constructor. * * \param[in] A %Matrix (expression) forming the argument of the matrix function. * \param[in] f Stem function for matrix function under consideration. */ MatrixFunctionReturnValue(const Derived& A, StemFunction f) : m_A(A), m_f(f) { } /** \brief Compute the matrix function. * * \param[out] result \p f applied to \p A, where \p f and \p A are as in the constructor. */ template inline void evalTo(ResultType& result) const { typedef typename internal::nested_eval::type NestedEvalType; typedef typename internal::remove_all::type NestedEvalTypeClean; typedef internal::traits Traits; static const int RowsAtCompileTime = Traits::RowsAtCompileTime; static const int ColsAtCompileTime = Traits::ColsAtCompileTime; typedef std::complex::Real> ComplexScalar; typedef Matrix DynMatrixType; typedef internal::MatrixFunctionAtomic AtomicType; AtomicType atomic(m_f); internal::matrix_function_compute::run(m_A, atomic, result); } Index rows() const { return m_A.rows(); } Index cols() const { return m_A.cols(); } private: const DerivedNested m_A; StemFunction *m_f; }; namespace internal { template struct traits > { typedef typename Derived::PlainObject ReturnType; }; } /********** MatrixBase methods **********/ template const MatrixFunctionReturnValue MatrixBase::matrixFunction(typename internal::stem_function::Scalar>::type f) const { eigen_assert(rows() == cols()); return MatrixFunctionReturnValue(derived(), f); } template const MatrixFunctionReturnValue MatrixBase::sin() const { eigen_assert(rows() == cols()); typedef typename internal::stem_function::ComplexScalar ComplexScalar; return MatrixFunctionReturnValue(derived(), internal::stem_function_sin); } template const MatrixFunctionReturnValue MatrixBase::cos() const { eigen_assert(rows() == cols()); typedef typename internal::stem_function::ComplexScalar ComplexScalar; return MatrixFunctionReturnValue(derived(), internal::stem_function_cos); } template const MatrixFunctionReturnValue MatrixBase::sinh() const { eigen_assert(rows() == cols()); typedef typename internal::stem_function::ComplexScalar ComplexScalar; return MatrixFunctionReturnValue(derived(), internal::stem_function_sinh); } template const MatrixFunctionReturnValue MatrixBase::cosh() const { eigen_assert(rows() == cols()); typedef typename internal::stem_function::ComplexScalar ComplexScalar; return MatrixFunctionReturnValue(derived(), internal::stem_function_cosh); } } // end namespace Eigen #endif // EIGEN_MATRIX_FUNCTION RcppEigen/inst/include/unsupported/Eigen/src/AutoDiff/0000755000176200001440000000000013235710577022447 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h0000755000176200001440000006675113235710577025471 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_AUTODIFF_SCALAR_H #define EIGEN_AUTODIFF_SCALAR_H namespace Eigen { namespace internal { template struct make_coherent_impl { static void run(A&, B&) {} }; // resize a to match b is a.size()==0, and conversely. template void make_coherent(const A& a, const B&b) { make_coherent_impl::run(a.const_cast_derived(), b.const_cast_derived()); } template struct auto_diff_special_op; } // end namespace internal template class AutoDiffScalar; template inline AutoDiffScalar MakeAutoDiffScalar(const typename NewDerType::Scalar& value, const NewDerType &der) { return AutoDiffScalar(value,der); } /** \class AutoDiffScalar * \brief A scalar type replacement with automatic differentation capability * * \param _DerType the vector type used to store/represent the derivatives. The base scalar type * as well as the number of derivatives to compute are determined from this type. * Typical choices include, e.g., \c Vector4f for 4 derivatives, or \c VectorXf * if the number of derivatives is not known at compile time, and/or, the number * of derivatives is large. * Note that _DerType can also be a reference (e.g., \c VectorXf&) to wrap a * existing vector into an AutoDiffScalar. * Finally, _DerType can also be any Eigen compatible expression. * * This class represents a scalar value while tracking its respective derivatives using Eigen's expression * template mechanism. * * It supports the following list of global math function: * - std::abs, std::sqrt, std::pow, std::exp, std::log, std::sin, std::cos, * - internal::abs, internal::sqrt, numext::pow, internal::exp, internal::log, internal::sin, internal::cos, * - internal::conj, internal::real, internal::imag, numext::abs2. * * AutoDiffScalar can be used as the scalar type of an Eigen::Matrix object. However, * in that case, the expression template mechanism only occurs at the top Matrix level, * while derivatives are computed right away. * */ template class AutoDiffScalar : public internal::auto_diff_special_op <_DerType, !internal::is_same::type>::Scalar, typename NumTraits::type>::Scalar>::Real>::value> { public: typedef internal::auto_diff_special_op <_DerType, !internal::is_same::type>::Scalar, typename NumTraits::type>::Scalar>::Real>::value> Base; typedef typename internal::remove_all<_DerType>::type DerType; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real Real; using Base::operator+; using Base::operator*; /** Default constructor without any initialization. */ AutoDiffScalar() {} /** Constructs an active scalar from its \a value, and initializes the \a nbDer derivatives such that it corresponds to the \a derNumber -th variable */ AutoDiffScalar(const Scalar& value, int nbDer, int derNumber) : m_value(value), m_derivatives(DerType::Zero(nbDer)) { m_derivatives.coeffRef(derNumber) = Scalar(1); } /** Conversion from a scalar constant to an active scalar. * The derivatives are set to zero. */ /*explicit*/ AutoDiffScalar(const Real& value) : m_value(value) { if(m_derivatives.size()>0) m_derivatives.setZero(); } /** Constructs an active scalar from its \a value and derivatives \a der */ AutoDiffScalar(const Scalar& value, const DerType& der) : m_value(value), m_derivatives(der) {} template AutoDiffScalar(const AutoDiffScalar& other #ifndef EIGEN_PARSED_BY_DOXYGEN , typename internal::enable_if< internal::is_same::type>::Scalar>::value && internal::is_convertible::value , void*>::type = 0 #endif ) : m_value(other.value()), m_derivatives(other.derivatives()) {} friend std::ostream & operator << (std::ostream & s, const AutoDiffScalar& a) { return s << a.value(); } AutoDiffScalar(const AutoDiffScalar& other) : m_value(other.value()), m_derivatives(other.derivatives()) {} template inline AutoDiffScalar& operator=(const AutoDiffScalar& other) { m_value = other.value(); m_derivatives = other.derivatives(); return *this; } inline AutoDiffScalar& operator=(const AutoDiffScalar& other) { m_value = other.value(); m_derivatives = other.derivatives(); return *this; } inline AutoDiffScalar& operator=(const Scalar& other) { m_value = other; if(m_derivatives.size()>0) m_derivatives.setZero(); return *this; } // inline operator const Scalar& () const { return m_value; } // inline operator Scalar& () { return m_value; } inline const Scalar& value() const { return m_value; } inline Scalar& value() { return m_value; } inline const DerType& derivatives() const { return m_derivatives; } inline DerType& derivatives() { return m_derivatives; } inline bool operator< (const Scalar& other) const { return m_value < other; } inline bool operator<=(const Scalar& other) const { return m_value <= other; } inline bool operator> (const Scalar& other) const { return m_value > other; } inline bool operator>=(const Scalar& other) const { return m_value >= other; } inline bool operator==(const Scalar& other) const { return m_value == other; } inline bool operator!=(const Scalar& other) const { return m_value != other; } friend inline bool operator< (const Scalar& a, const AutoDiffScalar& b) { return a < b.value(); } friend inline bool operator<=(const Scalar& a, const AutoDiffScalar& b) { return a <= b.value(); } friend inline bool operator> (const Scalar& a, const AutoDiffScalar& b) { return a > b.value(); } friend inline bool operator>=(const Scalar& a, const AutoDiffScalar& b) { return a >= b.value(); } friend inline bool operator==(const Scalar& a, const AutoDiffScalar& b) { return a == b.value(); } friend inline bool operator!=(const Scalar& a, const AutoDiffScalar& b) { return a != b.value(); } template inline bool operator< (const AutoDiffScalar& b) const { return m_value < b.value(); } template inline bool operator<=(const AutoDiffScalar& b) const { return m_value <= b.value(); } template inline bool operator> (const AutoDiffScalar& b) const { return m_value > b.value(); } template inline bool operator>=(const AutoDiffScalar& b) const { return m_value >= b.value(); } template inline bool operator==(const AutoDiffScalar& b) const { return m_value == b.value(); } template inline bool operator!=(const AutoDiffScalar& b) const { return m_value != b.value(); } inline const AutoDiffScalar operator+(const Scalar& other) const { return AutoDiffScalar(m_value + other, m_derivatives); } friend inline const AutoDiffScalar operator+(const Scalar& a, const AutoDiffScalar& b) { return AutoDiffScalar(a + b.value(), b.derivatives()); } // inline const AutoDiffScalar operator+(const Real& other) const // { // return AutoDiffScalar(m_value + other, m_derivatives); // } // friend inline const AutoDiffScalar operator+(const Real& a, const AutoDiffScalar& b) // { // return AutoDiffScalar(a + b.value(), b.derivatives()); // } inline AutoDiffScalar& operator+=(const Scalar& other) { value() += other; return *this; } template inline const AutoDiffScalar,const DerType,const typename internal::remove_all::type> > operator+(const AutoDiffScalar& other) const { internal::make_coherent(m_derivatives, other.derivatives()); return AutoDiffScalar,const DerType,const typename internal::remove_all::type> >( m_value + other.value(), m_derivatives + other.derivatives()); } template inline AutoDiffScalar& operator+=(const AutoDiffScalar& other) { (*this) = (*this) + other; return *this; } inline const AutoDiffScalar operator-(const Scalar& b) const { return AutoDiffScalar(m_value - b, m_derivatives); } friend inline const AutoDiffScalar, const DerType> > operator-(const Scalar& a, const AutoDiffScalar& b) { return AutoDiffScalar, const DerType> > (a - b.value(), -b.derivatives()); } inline AutoDiffScalar& operator-=(const Scalar& other) { value() -= other; return *this; } template inline const AutoDiffScalar, const DerType,const typename internal::remove_all::type> > operator-(const AutoDiffScalar& other) const { internal::make_coherent(m_derivatives, other.derivatives()); return AutoDiffScalar, const DerType,const typename internal::remove_all::type> >( m_value - other.value(), m_derivatives - other.derivatives()); } template inline AutoDiffScalar& operator-=(const AutoDiffScalar& other) { *this = *this - other; return *this; } inline const AutoDiffScalar, const DerType> > operator-() const { return AutoDiffScalar, const DerType> >( -m_value, -m_derivatives); } inline const AutoDiffScalar operator*(const Scalar& other) const { return MakeAutoDiffScalar(m_value * other, m_derivatives * other); } friend inline const AutoDiffScalar operator*(const Scalar& other, const AutoDiffScalar& a) { return MakeAutoDiffScalar(a.value() * other, a.derivatives() * other); } // inline const AutoDiffScalar, DerType>::Type > // operator*(const Real& other) const // { // return AutoDiffScalar, DerType>::Type >( // m_value * other, // (m_derivatives * other)); // } // // friend inline const AutoDiffScalar, DerType>::Type > // operator*(const Real& other, const AutoDiffScalar& a) // { // return AutoDiffScalar, DerType>::Type >( // a.value() * other, // a.derivatives() * other); // } inline const AutoDiffScalar operator/(const Scalar& other) const { return MakeAutoDiffScalar(m_value / other, (m_derivatives * (Scalar(1)/other))); } friend inline const AutoDiffScalar operator/(const Scalar& other, const AutoDiffScalar& a) { return MakeAutoDiffScalar(other / a.value(), a.derivatives() * (Scalar(-other) / (a.value()*a.value()))); } // inline const AutoDiffScalar, DerType>::Type > // operator/(const Real& other) const // { // return AutoDiffScalar, DerType>::Type >( // m_value / other, // (m_derivatives * (Real(1)/other))); // } // // friend inline const AutoDiffScalar, DerType>::Type > // operator/(const Real& other, const AutoDiffScalar& a) // { // return AutoDiffScalar, DerType>::Type >( // other / a.value(), // a.derivatives() * (-Real(1)/other)); // } template inline const AutoDiffScalar EIGEN_COMMA const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product) EIGEN_COMMA const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(typename internal::remove_all::type,Scalar,product) >,Scalar,product) > operator/(const AutoDiffScalar& other) const { internal::make_coherent(m_derivatives, other.derivatives()); return MakeAutoDiffScalar( m_value / other.value(), ((m_derivatives * other.value()) - (other.derivatives() * m_value)) * (Scalar(1)/(other.value()*other.value()))); } template inline const AutoDiffScalar, const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product), const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(typename internal::remove_all::type,Scalar,product) > > operator*(const AutoDiffScalar& other) const { internal::make_coherent(m_derivatives, other.derivatives()); return MakeAutoDiffScalar( m_value * other.value(), (m_derivatives * other.value()) + (other.derivatives() * m_value)); } inline AutoDiffScalar& operator*=(const Scalar& other) { *this = *this * other; return *this; } template inline AutoDiffScalar& operator*=(const AutoDiffScalar& other) { *this = *this * other; return *this; } inline AutoDiffScalar& operator/=(const Scalar& other) { *this = *this / other; return *this; } template inline AutoDiffScalar& operator/=(const AutoDiffScalar& other) { *this = *this / other; return *this; } protected: Scalar m_value; DerType m_derivatives; }; namespace internal { template struct auto_diff_special_op<_DerType, true> // : auto_diff_scalar_op<_DerType, typename NumTraits::Real, // is_same::Real>::value> { typedef typename remove_all<_DerType>::type DerType; typedef typename traits::Scalar Scalar; typedef typename NumTraits::Real Real; // typedef auto_diff_scalar_op<_DerType, typename NumTraits::Real, // is_same::Real>::value> Base; // using Base::operator+; // using Base::operator+=; // using Base::operator-; // using Base::operator-=; // using Base::operator*; // using Base::operator*=; const AutoDiffScalar<_DerType>& derived() const { return *static_cast*>(this); } AutoDiffScalar<_DerType>& derived() { return *static_cast*>(this); } inline const AutoDiffScalar operator+(const Real& other) const { return AutoDiffScalar(derived().value() + other, derived().derivatives()); } friend inline const AutoDiffScalar operator+(const Real& a, const AutoDiffScalar<_DerType>& b) { return AutoDiffScalar(a + b.value(), b.derivatives()); } inline AutoDiffScalar<_DerType>& operator+=(const Real& other) { derived().value() += other; return derived(); } inline const AutoDiffScalar >, DerType>::Type > operator*(const Real& other) const { return AutoDiffScalar >, DerType>::Type >( derived().value() * other, derived().derivatives() * other); } friend inline const AutoDiffScalar >, DerType>::Type > operator*(const Real& other, const AutoDiffScalar<_DerType>& a) { return AutoDiffScalar >, DerType>::Type >( a.value() * other, a.derivatives() * other); } inline AutoDiffScalar<_DerType>& operator*=(const Scalar& other) { *this = *this * other; return derived(); } }; template struct auto_diff_special_op<_DerType, false> { void operator*() const; void operator-() const; void operator+() const; }; template struct make_coherent_impl, B> { typedef Matrix A; static void run(A& a, B& b) { if((A_Rows==Dynamic || A_Cols==Dynamic) && (a.size()==0)) { a.resize(b.size()); a.setZero(); } } }; template struct make_coherent_impl > { typedef Matrix B; static void run(A& a, B& b) { if((B_Rows==Dynamic || B_Cols==Dynamic) && (b.size()==0)) { b.resize(a.size()); b.setZero(); } } }; template struct make_coherent_impl, Matrix > { typedef Matrix A; typedef Matrix B; static void run(A& a, B& b) { if((A_Rows==Dynamic || A_Cols==Dynamic) && (a.size()==0)) { a.resize(b.size()); a.setZero(); } else if((B_Rows==Dynamic || B_Cols==Dynamic) && (b.size()==0)) { b.resize(a.size()); b.setZero(); } } }; } // end namespace internal template struct ScalarBinaryOpTraits,typename DerType::Scalar,BinOp> { typedef AutoDiffScalar ReturnType; }; template struct ScalarBinaryOpTraits, BinOp> { typedef AutoDiffScalar ReturnType; }; // The following is an attempt to let Eigen's known about expression template, but that's more tricky! // template // struct ScalarBinaryOpTraits,AutoDiffScalar, BinOp> // { // enum { Defined = 1 }; // typedef AutoDiffScalar ReturnType; // }; // // template // struct ScalarBinaryOpTraits,AutoDiffScalar, BinOp> // { // enum { Defined = 1 };//internal::is_same::value }; // typedef AutoDiffScalar ReturnType; // }; #define EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(FUNC,CODE) \ template \ inline const Eigen::AutoDiffScalar< \ EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(typename Eigen::internal::remove_all::type, typename Eigen::internal::traits::type>::Scalar, product) > \ FUNC(const Eigen::AutoDiffScalar& x) { \ using namespace Eigen; \ EIGEN_UNUSED typedef typename Eigen::internal::traits::type>::Scalar Scalar; \ CODE; \ } template inline const AutoDiffScalar& conj(const AutoDiffScalar& x) { return x; } template inline const AutoDiffScalar& real(const AutoDiffScalar& x) { return x; } template inline typename DerType::Scalar imag(const AutoDiffScalar&) { return 0.; } template inline AutoDiffScalar::type::PlainObject> (min)(const AutoDiffScalar& x, const T& y) { typedef AutoDiffScalar::type::PlainObject> ADS; return (x <= y ? ADS(x) : ADS(y)); } template inline AutoDiffScalar::type::PlainObject> (max)(const AutoDiffScalar& x, const T& y) { typedef AutoDiffScalar::type::PlainObject> ADS; return (x >= y ? ADS(x) : ADS(y)); } template inline AutoDiffScalar::type::PlainObject> (min)(const T& x, const AutoDiffScalar& y) { typedef AutoDiffScalar::type::PlainObject> ADS; return (x < y ? ADS(x) : ADS(y)); } template inline AutoDiffScalar::type::PlainObject> (max)(const T& x, const AutoDiffScalar& y) { typedef AutoDiffScalar::type::PlainObject> ADS; return (x > y ? ADS(x) : ADS(y)); } template inline AutoDiffScalar::type::PlainObject> (min)(const AutoDiffScalar& x, const AutoDiffScalar& y) { return (x.value() < y.value() ? x : y); } template inline AutoDiffScalar::type::PlainObject> (max)(const AutoDiffScalar& x, const AutoDiffScalar& y) { return (x.value() >= y.value() ? x : y); } EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(abs, using std::abs; return Eigen::MakeAutoDiffScalar(abs(x.value()), x.derivatives() * (x.value()<0 ? -1 : 1) );) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(abs2, using numext::abs2; return Eigen::MakeAutoDiffScalar(abs2(x.value()), x.derivatives() * (Scalar(2)*x.value()));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sqrt, using std::sqrt; Scalar sqrtx = sqrt(x.value()); return Eigen::MakeAutoDiffScalar(sqrtx,x.derivatives() * (Scalar(0.5) / sqrtx));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(cos, using std::cos; using std::sin; return Eigen::MakeAutoDiffScalar(cos(x.value()), x.derivatives() * (-sin(x.value())));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sin, using std::sin; using std::cos; return Eigen::MakeAutoDiffScalar(sin(x.value()),x.derivatives() * cos(x.value()));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(exp, using std::exp; Scalar expx = exp(x.value()); return Eigen::MakeAutoDiffScalar(expx,x.derivatives() * expx);) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(log, using std::log; return Eigen::MakeAutoDiffScalar(log(x.value()),x.derivatives() * (Scalar(1)/x.value()));) template inline const Eigen::AutoDiffScalar< EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(typename internal::remove_all::type,typename internal::traits::type>::Scalar,product) > pow(const Eigen::AutoDiffScalar &x, const typename internal::traits::type>::Scalar &y) { using namespace Eigen; using std::pow; return Eigen::MakeAutoDiffScalar(pow(x.value(),y), x.derivatives() * (y * pow(x.value(),y-1))); } template inline const AutoDiffScalar::type>::Scalar,Dynamic,1> > atan2(const AutoDiffScalar& a, const AutoDiffScalar& b) { using std::atan2; typedef typename internal::traits::type>::Scalar Scalar; typedef AutoDiffScalar > PlainADS; PlainADS ret; ret.value() = atan2(a.value(), b.value()); Scalar squared_hypot = a.value() * a.value() + b.value() * b.value(); // if (squared_hypot==0) the derivation is undefined and the following results in a NaN: ret.derivatives() = (a.derivatives() * b.value() - a.value() * b.derivatives()) / squared_hypot; return ret; } EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(tan, using std::tan; using std::cos; return Eigen::MakeAutoDiffScalar(tan(x.value()),x.derivatives() * (Scalar(1)/numext::abs2(cos(x.value()))));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(asin, using std::sqrt; using std::asin; return Eigen::MakeAutoDiffScalar(asin(x.value()),x.derivatives() * (Scalar(1)/sqrt(1-numext::abs2(x.value()))));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(acos, using std::sqrt; using std::acos; return Eigen::MakeAutoDiffScalar(acos(x.value()),x.derivatives() * (Scalar(-1)/sqrt(1-numext::abs2(x.value()))));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(tanh, using std::cosh; using std::tanh; return Eigen::MakeAutoDiffScalar(tanh(x.value()),x.derivatives() * (Scalar(1)/numext::abs2(cosh(x.value()))));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sinh, using std::sinh; using std::cosh; return Eigen::MakeAutoDiffScalar(sinh(x.value()),x.derivatives() * cosh(x.value()));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(cosh, using std::sinh; using std::cosh; return Eigen::MakeAutoDiffScalar(cosh(x.value()),x.derivatives() * sinh(x.value()));) #undef EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY template struct NumTraits > : NumTraits< typename NumTraits::type::Scalar>::Real > { typedef typename internal::remove_all::type DerTypeCleaned; typedef AutoDiffScalar::Real,DerTypeCleaned::RowsAtCompileTime,DerTypeCleaned::ColsAtCompileTime, 0, DerTypeCleaned::MaxRowsAtCompileTime, DerTypeCleaned::MaxColsAtCompileTime> > Real; typedef AutoDiffScalar NonInteger; typedef AutoDiffScalar Nested; typedef typename NumTraits::Literal Literal; enum{ RequireInitialization = 1 }; }; } namespace std { template class numeric_limits > : public numeric_limits {}; } // namespace std #endif // EIGEN_AUTODIFF_SCALAR_H RcppEigen/inst/include/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h0000644000176200001440000000611613100733213025733 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_AUTODIFF_JACOBIAN_H #define EIGEN_AUTODIFF_JACOBIAN_H namespace Eigen { template class AutoDiffJacobian : public Functor { public: AutoDiffJacobian() : Functor() {} AutoDiffJacobian(const Functor& f) : Functor(f) {} // forward constructors #if EIGEN_HAS_VARIADIC_TEMPLATES template AutoDiffJacobian(const T& ...Values) : Functor(Values...) {} #else template AutoDiffJacobian(const T0& a0) : Functor(a0) {} template AutoDiffJacobian(const T0& a0, const T1& a1) : Functor(a0, a1) {} template AutoDiffJacobian(const T0& a0, const T1& a1, const T2& a2) : Functor(a0, a1, a2) {} #endif typedef typename Functor::InputType InputType; typedef typename Functor::ValueType ValueType; typedef typename ValueType::Scalar Scalar; enum { InputsAtCompileTime = InputType::RowsAtCompileTime, ValuesAtCompileTime = ValueType::RowsAtCompileTime }; typedef Matrix JacobianType; typedef typename JacobianType::Index Index; typedef Matrix DerivativeType; typedef AutoDiffScalar ActiveScalar; typedef Matrix ActiveInput; typedef Matrix ActiveValue; #if EIGEN_HAS_VARIADIC_TEMPLATES // Some compilers don't accept variadic parameters after a default parameter, // i.e., we can't just write _jac=0 but we need to overload operator(): EIGEN_STRONG_INLINE void operator() (const InputType& x, ValueType* v) const { this->operator()(x, v, 0); } template void operator() (const InputType& x, ValueType* v, JacobianType* _jac, const ParamsType&... Params) const #else void operator() (const InputType& x, ValueType* v, JacobianType* _jac=0) const #endif { eigen_assert(v!=0); if (!_jac) { #if EIGEN_HAS_VARIADIC_TEMPLATES Functor::operator()(x, v, Params...); #else Functor::operator()(x, v); #endif return; } JacobianType& jac = *_jac; ActiveInput ax = x.template cast(); ActiveValue av(jac.rows()); if(InputsAtCompileTime==Dynamic) for (Index j=0; j // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_AUTODIFF_VECTOR_H #define EIGEN_AUTODIFF_VECTOR_H namespace Eigen { /* \class AutoDiffScalar * \brief A scalar type replacement with automatic differentation capability * * \param DerType the vector type used to store/represent the derivatives (e.g. Vector3f) * * This class represents a scalar value while tracking its respective derivatives. * * It supports the following list of global math function: * - std::abs, std::sqrt, std::pow, std::exp, std::log, std::sin, std::cos, * - internal::abs, internal::sqrt, numext::pow, internal::exp, internal::log, internal::sin, internal::cos, * - internal::conj, internal::real, internal::imag, numext::abs2. * * AutoDiffScalar can be used as the scalar type of an Eigen::Matrix object. However, * in that case, the expression template mechanism only occurs at the top Matrix level, * while derivatives are computed right away. * */ template class AutoDiffVector { public: //typedef typename internal::traits::Scalar Scalar; typedef typename internal::traits::Scalar BaseScalar; typedef AutoDiffScalar > ActiveScalar; typedef ActiveScalar Scalar; typedef AutoDiffScalar CoeffType; typedef typename JacobianType::Index Index; inline AutoDiffVector() {} inline AutoDiffVector(const ValueType& values) : m_values(values) { m_jacobian.setZero(); } CoeffType operator[] (Index i) { return CoeffType(m_values[i], m_jacobian.col(i)); } const CoeffType operator[] (Index i) const { return CoeffType(m_values[i], m_jacobian.col(i)); } CoeffType operator() (Index i) { return CoeffType(m_values[i], m_jacobian.col(i)); } const CoeffType operator() (Index i) const { return CoeffType(m_values[i], m_jacobian.col(i)); } CoeffType coeffRef(Index i) { return CoeffType(m_values[i], m_jacobian.col(i)); } const CoeffType coeffRef(Index i) const { return CoeffType(m_values[i], m_jacobian.col(i)); } Index size() const { return m_values.size(); } // FIXME here we could return an expression of the sum Scalar sum() const { /*std::cerr << "sum \n\n";*/ /*std::cerr << m_jacobian.rowwise().sum() << "\n\n";*/ return Scalar(m_values.sum(), m_jacobian.rowwise().sum()); } inline AutoDiffVector(const ValueType& values, const JacobianType& jac) : m_values(values), m_jacobian(jac) {} template inline AutoDiffVector(const AutoDiffVector& other) : m_values(other.values()), m_jacobian(other.jacobian()) {} inline AutoDiffVector(const AutoDiffVector& other) : m_values(other.values()), m_jacobian(other.jacobian()) {} template inline AutoDiffVector& operator=(const AutoDiffVector& other) { m_values = other.values(); m_jacobian = other.jacobian(); return *this; } inline AutoDiffVector& operator=(const AutoDiffVector& other) { m_values = other.values(); m_jacobian = other.jacobian(); return *this; } inline const ValueType& values() const { return m_values; } inline ValueType& values() { return m_values; } inline const JacobianType& jacobian() const { return m_jacobian; } inline JacobianType& jacobian() { return m_jacobian; } template inline const AutoDiffVector< typename MakeCwiseBinaryOp,ValueType,OtherValueType>::Type, typename MakeCwiseBinaryOp,JacobianType,OtherJacobianType>::Type > operator+(const AutoDiffVector& other) const { return AutoDiffVector< typename MakeCwiseBinaryOp,ValueType,OtherValueType>::Type, typename MakeCwiseBinaryOp,JacobianType,OtherJacobianType>::Type >( m_values + other.values(), m_jacobian + other.jacobian()); } template inline AutoDiffVector& operator+=(const AutoDiffVector& other) { m_values += other.values(); m_jacobian += other.jacobian(); return *this; } template inline const AutoDiffVector< typename MakeCwiseBinaryOp,ValueType,OtherValueType>::Type, typename MakeCwiseBinaryOp,JacobianType,OtherJacobianType>::Type > operator-(const AutoDiffVector& other) const { return AutoDiffVector< typename MakeCwiseBinaryOp,ValueType,OtherValueType>::Type, typename MakeCwiseBinaryOp,JacobianType,OtherJacobianType>::Type >( m_values - other.values(), m_jacobian - other.jacobian()); } template inline AutoDiffVector& operator-=(const AutoDiffVector& other) { m_values -= other.values(); m_jacobian -= other.jacobian(); return *this; } inline const AutoDiffVector< typename MakeCwiseUnaryOp, ValueType>::Type, typename MakeCwiseUnaryOp, JacobianType>::Type > operator-() const { return AutoDiffVector< typename MakeCwiseUnaryOp, ValueType>::Type, typename MakeCwiseUnaryOp, JacobianType>::Type >( -m_values, -m_jacobian); } inline const AutoDiffVector< typename MakeCwiseUnaryOp, ValueType>::Type, typename MakeCwiseUnaryOp, JacobianType>::Type> operator*(const BaseScalar& other) const { return AutoDiffVector< typename MakeCwiseUnaryOp, ValueType>::Type, typename MakeCwiseUnaryOp, JacobianType>::Type >( m_values * other, m_jacobian * other); } friend inline const AutoDiffVector< typename MakeCwiseUnaryOp, ValueType>::Type, typename MakeCwiseUnaryOp, JacobianType>::Type > operator*(const Scalar& other, const AutoDiffVector& v) { return AutoDiffVector< typename MakeCwiseUnaryOp, ValueType>::Type, typename MakeCwiseUnaryOp, JacobianType>::Type >( v.values() * other, v.jacobian() * other); } // template // inline const AutoDiffVector< // CwiseBinaryOp, ValueType, OtherValueType> // CwiseBinaryOp, // CwiseUnaryOp, JacobianType>, // CwiseUnaryOp, OtherJacobianType> > > // operator*(const AutoDiffVector& other) const // { // return AutoDiffVector< // CwiseBinaryOp, ValueType, OtherValueType> // CwiseBinaryOp, // CwiseUnaryOp, JacobianType>, // CwiseUnaryOp, OtherJacobianType> > >( // m_values.cwise() * other.values(), // (m_jacobian * other.values()) + (m_values * other.jacobian())); // } inline AutoDiffVector& operator*=(const Scalar& other) { m_values *= other; m_jacobian *= other; return *this; } template inline AutoDiffVector& operator*=(const AutoDiffVector& other) { *this = *this * other; return *this; } protected: ValueType m_values; JacobianType m_jacobian; }; } #endif // EIGEN_AUTODIFF_VECTOR_H RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/0000755000176200001440000000000013100733213025221 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h0000644000176200001440000000252212253717461026737 0ustar liggesusersnamespace Eigen { namespace internal { template void rwupdt( Matrix< Scalar, Dynamic, Dynamic > &r, const Matrix< Scalar, Dynamic, 1> &w, Matrix< Scalar, Dynamic, 1> &b, Scalar alpha) { typedef DenseIndex Index; const Index n = r.cols(); eigen_assert(r.rows()>=n); std::vector > givens(n); /* Local variables */ Scalar temp, rowj; /* Function Body */ for (Index j = 0; j < n; ++j) { rowj = w[j]; /* apply the previous transformations to */ /* r(i,j), i=0,1,...,j-1, and to w(j). */ for (Index i = 0; i < j; ++i) { temp = givens[i].c() * r(i,j) + givens[i].s() * rowj; rowj = -givens[i].s() * r(i,j) + givens[i].c() * rowj; r(i,j) = temp; } /* determine a givens rotation which eliminates w(j). */ givens[j].makeGivens(-r(j,j), rowj); if (rowj == 0.) continue; // givens[j] is identity /* apply the current transformation to r(j,j), b(j), and alpha. */ r(j,j) = givens[j].c() * r(j,j) + givens[j].s() * rowj; temp = givens[j].c() * b[j] + givens[j].s() * alpha; alpha = -givens[j].s() * b[j] + givens[j].c() * alpha; b[j] = temp; } } } // end namespace internal } // end namespace Eigen RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h0000644000176200001440000005316713100733213031200 0ustar liggesusers// -*- coding: utf-8 // vim: set fileencoding=utf-8 // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Thomas Capricelli // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LEVENBERGMARQUARDT__H #define EIGEN_LEVENBERGMARQUARDT__H namespace Eigen { namespace LevenbergMarquardtSpace { enum Status { NotStarted = -2, Running = -1, ImproperInputParameters = 0, RelativeReductionTooSmall = 1, RelativeErrorTooSmall = 2, RelativeErrorAndReductionTooSmall = 3, CosinusTooSmall = 4, TooManyFunctionEvaluation = 5, FtolTooSmall = 6, XtolTooSmall = 7, GtolTooSmall = 8, UserAsked = 9 }; } /** * \ingroup NonLinearOptimization_Module * \brief Performs non linear optimization over a non-linear function, * using a variant of the Levenberg Marquardt algorithm. * * Check wikipedia for more information. * http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm */ template class LevenbergMarquardt { static Scalar sqrt_epsilon() { using std::sqrt; return sqrt(NumTraits::epsilon()); } public: LevenbergMarquardt(FunctorType &_functor) : functor(_functor) { nfev = njev = iter = 0; fnorm = gnorm = 0.; useExternalScaling=false; } typedef DenseIndex Index; struct Parameters { Parameters() : factor(Scalar(100.)) , maxfev(400) , ftol(sqrt_epsilon()) , xtol(sqrt_epsilon()) , gtol(Scalar(0.)) , epsfcn(Scalar(0.)) {} Scalar factor; Index maxfev; // maximum number of function evaluation Scalar ftol; Scalar xtol; Scalar gtol; Scalar epsfcn; }; typedef Matrix< Scalar, Dynamic, 1 > FVectorType; typedef Matrix< Scalar, Dynamic, Dynamic > JacobianType; LevenbergMarquardtSpace::Status lmder1( FVectorType &x, const Scalar tol = sqrt_epsilon() ); LevenbergMarquardtSpace::Status minimize(FVectorType &x); LevenbergMarquardtSpace::Status minimizeInit(FVectorType &x); LevenbergMarquardtSpace::Status minimizeOneStep(FVectorType &x); static LevenbergMarquardtSpace::Status lmdif1( FunctorType &functor, FVectorType &x, Index *nfev, const Scalar tol = sqrt_epsilon() ); LevenbergMarquardtSpace::Status lmstr1( FVectorType &x, const Scalar tol = sqrt_epsilon() ); LevenbergMarquardtSpace::Status minimizeOptimumStorage(FVectorType &x); LevenbergMarquardtSpace::Status minimizeOptimumStorageInit(FVectorType &x); LevenbergMarquardtSpace::Status minimizeOptimumStorageOneStep(FVectorType &x); void resetParameters(void) { parameters = Parameters(); } Parameters parameters; FVectorType fvec, qtf, diag; JacobianType fjac; PermutationMatrix permutation; Index nfev; Index njev; Index iter; Scalar fnorm, gnorm; bool useExternalScaling; Scalar lm_param(void) { return par; } private: FunctorType &functor; Index n; Index m; FVectorType wa1, wa2, wa3, wa4; Scalar par, sum; Scalar temp, temp1, temp2; Scalar delta; Scalar ratio; Scalar pnorm, xnorm, fnorm1, actred, dirder, prered; LevenbergMarquardt& operator=(const LevenbergMarquardt&); }; template LevenbergMarquardtSpace::Status LevenbergMarquardt::lmder1( FVectorType &x, const Scalar tol ) { n = x.size(); m = functor.values(); /* check the input parameters for errors. */ if (n <= 0 || m < n || tol < 0.) return LevenbergMarquardtSpace::ImproperInputParameters; resetParameters(); parameters.ftol = tol; parameters.xtol = tol; parameters.maxfev = 100*(n+1); return minimize(x); } template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimize(FVectorType &x) { LevenbergMarquardtSpace::Status status = minimizeInit(x); if (status==LevenbergMarquardtSpace::ImproperInputParameters) return status; do { status = minimizeOneStep(x); } while (status==LevenbergMarquardtSpace::Running); return status; } template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimizeInit(FVectorType &x) { n = x.size(); m = functor.values(); wa1.resize(n); wa2.resize(n); wa3.resize(n); wa4.resize(m); fvec.resize(m); fjac.resize(m, n); if (!useExternalScaling) diag.resize(n); eigen_assert( (!useExternalScaling || diag.size()==n) && "When useExternalScaling is set, the caller must provide a valid 'diag'"); qtf.resize(n); /* Function Body */ nfev = 0; njev = 0; /* check the input parameters for errors. */ if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.) return LevenbergMarquardtSpace::ImproperInputParameters; if (useExternalScaling) for (Index j = 0; j < n; ++j) if (diag[j] <= 0.) return LevenbergMarquardtSpace::ImproperInputParameters; /* evaluate the function at the starting point */ /* and calculate its norm. */ nfev = 1; if ( functor(x, fvec) < 0) return LevenbergMarquardtSpace::UserAsked; fnorm = fvec.stableNorm(); /* initialize levenberg-marquardt parameter and iteration counter. */ par = 0.; iter = 1; return LevenbergMarquardtSpace::NotStarted; } template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimizeOneStep(FVectorType &x) { using std::abs; using std::sqrt; eigen_assert(x.size()==n); // check the caller is not cheating us /* calculate the jacobian matrix. */ Index df_ret = functor.df(x, fjac); if (df_ret<0) return LevenbergMarquardtSpace::UserAsked; if (df_ret>0) // numerical diff, we evaluated the function df_ret times nfev += df_ret; else njev++; /* compute the qr factorization of the jacobian. */ wa2 = fjac.colwise().blueNorm(); ColPivHouseholderQR qrfac(fjac); fjac = qrfac.matrixQR(); permutation = qrfac.colsPermutation(); /* on the first iteration and if external scaling is not used, scale according */ /* to the norms of the columns of the initial jacobian. */ if (iter == 1) { if (!useExternalScaling) for (Index j = 0; j < n; ++j) diag[j] = (wa2[j]==0.)? 1. : wa2[j]; /* on the first iteration, calculate the norm of the scaled x */ /* and initialize the step bound delta. */ xnorm = diag.cwiseProduct(x).stableNorm(); delta = parameters.factor * xnorm; if (delta == 0.) delta = parameters.factor; } /* form (q transpose)*fvec and store the first n components in */ /* qtf. */ wa4 = fvec; wa4.applyOnTheLeft(qrfac.householderQ().adjoint()); qtf = wa4.head(n); /* compute the norm of the scaled gradient. */ gnorm = 0.; if (fnorm != 0.) for (Index j = 0; j < n; ++j) if (wa2[permutation.indices()[j]] != 0.) gnorm = (std::max)(gnorm, abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]])); /* test for convergence of the gradient norm. */ if (gnorm <= parameters.gtol) return LevenbergMarquardtSpace::CosinusTooSmall; /* rescale if necessary. */ if (!useExternalScaling) diag = diag.cwiseMax(wa2); do { /* determine the levenberg-marquardt parameter. */ internal::lmpar2(qrfac, diag, qtf, delta, par, wa1); /* store the direction p and x + p. calculate the norm of p. */ wa1 = -wa1; wa2 = x + wa1; pnorm = diag.cwiseProduct(wa1).stableNorm(); /* on the first iteration, adjust the initial step bound. */ if (iter == 1) delta = (std::min)(delta,pnorm); /* evaluate the function at x + p and calculate its norm. */ if ( functor(wa2, wa4) < 0) return LevenbergMarquardtSpace::UserAsked; ++nfev; fnorm1 = wa4.stableNorm(); /* compute the scaled actual reduction. */ actred = -1.; if (Scalar(.1) * fnorm1 < fnorm) actred = 1. - numext::abs2(fnorm1 / fnorm); /* compute the scaled predicted reduction and */ /* the scaled directional derivative. */ wa3 = fjac.template triangularView() * (qrfac.colsPermutation().inverse() *wa1); temp1 = numext::abs2(wa3.stableNorm() / fnorm); temp2 = numext::abs2(sqrt(par) * pnorm / fnorm); prered = temp1 + temp2 / Scalar(.5); dirder = -(temp1 + temp2); /* compute the ratio of the actual to the predicted */ /* reduction. */ ratio = 0.; if (prered != 0.) ratio = actred / prered; /* update the step bound. */ if (ratio <= Scalar(.25)) { if (actred >= 0.) temp = Scalar(.5); if (actred < 0.) temp = Scalar(.5) * dirder / (dirder + Scalar(.5) * actred); if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1)) temp = Scalar(.1); /* Computing MIN */ delta = temp * (std::min)(delta, pnorm / Scalar(.1)); par /= temp; } else if (!(par != 0. && ratio < Scalar(.75))) { delta = pnorm / Scalar(.5); par = Scalar(.5) * par; } /* test for successful iteration. */ if (ratio >= Scalar(1e-4)) { /* successful iteration. update x, fvec, and their norms. */ x = wa2; wa2 = diag.cwiseProduct(x); fvec = wa4; xnorm = wa2.stableNorm(); fnorm = fnorm1; ++iter; } /* tests for convergence. */ if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm) return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall; if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.) return LevenbergMarquardtSpace::RelativeReductionTooSmall; if (delta <= parameters.xtol * xnorm) return LevenbergMarquardtSpace::RelativeErrorTooSmall; /* tests for termination and stringent tolerances. */ if (nfev >= parameters.maxfev) return LevenbergMarquardtSpace::TooManyFunctionEvaluation; if (abs(actred) <= NumTraits::epsilon() && prered <= NumTraits::epsilon() && Scalar(.5) * ratio <= 1.) return LevenbergMarquardtSpace::FtolTooSmall; if (delta <= NumTraits::epsilon() * xnorm) return LevenbergMarquardtSpace::XtolTooSmall; if (gnorm <= NumTraits::epsilon()) return LevenbergMarquardtSpace::GtolTooSmall; } while (ratio < Scalar(1e-4)); return LevenbergMarquardtSpace::Running; } template LevenbergMarquardtSpace::Status LevenbergMarquardt::lmstr1( FVectorType &x, const Scalar tol ) { n = x.size(); m = functor.values(); /* check the input parameters for errors. */ if (n <= 0 || m < n || tol < 0.) return LevenbergMarquardtSpace::ImproperInputParameters; resetParameters(); parameters.ftol = tol; parameters.xtol = tol; parameters.maxfev = 100*(n+1); return minimizeOptimumStorage(x); } template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimizeOptimumStorageInit(FVectorType &x) { n = x.size(); m = functor.values(); wa1.resize(n); wa2.resize(n); wa3.resize(n); wa4.resize(m); fvec.resize(m); // Only R is stored in fjac. Q is only used to compute 'qtf', which is // Q.transpose()*rhs. qtf will be updated using givens rotation, // instead of storing them in Q. // The purpose it to only use a nxn matrix, instead of mxn here, so // that we can handle cases where m>>n : fjac.resize(n, n); if (!useExternalScaling) diag.resize(n); eigen_assert( (!useExternalScaling || diag.size()==n) && "When useExternalScaling is set, the caller must provide a valid 'diag'"); qtf.resize(n); /* Function Body */ nfev = 0; njev = 0; /* check the input parameters for errors. */ if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.) return LevenbergMarquardtSpace::ImproperInputParameters; if (useExternalScaling) for (Index j = 0; j < n; ++j) if (diag[j] <= 0.) return LevenbergMarquardtSpace::ImproperInputParameters; /* evaluate the function at the starting point */ /* and calculate its norm. */ nfev = 1; if ( functor(x, fvec) < 0) return LevenbergMarquardtSpace::UserAsked; fnorm = fvec.stableNorm(); /* initialize levenberg-marquardt parameter and iteration counter. */ par = 0.; iter = 1; return LevenbergMarquardtSpace::NotStarted; } template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimizeOptimumStorageOneStep(FVectorType &x) { using std::abs; using std::sqrt; eigen_assert(x.size()==n); // check the caller is not cheating us Index i, j; bool sing; /* compute the qr factorization of the jacobian matrix */ /* calculated one row at a time, while simultaneously */ /* forming (q transpose)*fvec and storing the first */ /* n components in qtf. */ qtf.fill(0.); fjac.fill(0.); Index rownb = 2; for (i = 0; i < m; ++i) { if (functor.df(x, wa3, rownb) < 0) return LevenbergMarquardtSpace::UserAsked; internal::rwupdt(fjac, wa3, qtf, fvec[i]); ++rownb; } ++njev; /* if the jacobian is rank deficient, call qrfac to */ /* reorder its columns and update the components of qtf. */ sing = false; for (j = 0; j < n; ++j) { if (fjac(j,j) == 0.) sing = true; wa2[j] = fjac.col(j).head(j).stableNorm(); } permutation.setIdentity(n); if (sing) { wa2 = fjac.colwise().blueNorm(); // TODO We have no unit test covering this code path, do not modify // until it is carefully tested ColPivHouseholderQR qrfac(fjac); fjac = qrfac.matrixQR(); wa1 = fjac.diagonal(); fjac.diagonal() = qrfac.hCoeffs(); permutation = qrfac.colsPermutation(); // TODO : avoid this: for(Index ii=0; ii< fjac.cols(); ii++) fjac.col(ii).segment(ii+1, fjac.rows()-ii-1) *= fjac(ii,ii); // rescale vectors for (j = 0; j < n; ++j) { if (fjac(j,j) != 0.) { sum = 0.; for (i = j; i < n; ++i) sum += fjac(i,j) * qtf[i]; temp = -sum / fjac(j,j); for (i = j; i < n; ++i) qtf[i] += fjac(i,j) * temp; } fjac(j,j) = wa1[j]; } } /* on the first iteration and if external scaling is not used, scale according */ /* to the norms of the columns of the initial jacobian. */ if (iter == 1) { if (!useExternalScaling) for (j = 0; j < n; ++j) diag[j] = (wa2[j]==0.)? 1. : wa2[j]; /* on the first iteration, calculate the norm of the scaled x */ /* and initialize the step bound delta. */ xnorm = diag.cwiseProduct(x).stableNorm(); delta = parameters.factor * xnorm; if (delta == 0.) delta = parameters.factor; } /* compute the norm of the scaled gradient. */ gnorm = 0.; if (fnorm != 0.) for (j = 0; j < n; ++j) if (wa2[permutation.indices()[j]] != 0.) gnorm = (std::max)(gnorm, abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]])); /* test for convergence of the gradient norm. */ if (gnorm <= parameters.gtol) return LevenbergMarquardtSpace::CosinusTooSmall; /* rescale if necessary. */ if (!useExternalScaling) diag = diag.cwiseMax(wa2); do { /* determine the levenberg-marquardt parameter. */ internal::lmpar(fjac, permutation.indices(), diag, qtf, delta, par, wa1); /* store the direction p and x + p. calculate the norm of p. */ wa1 = -wa1; wa2 = x + wa1; pnorm = diag.cwiseProduct(wa1).stableNorm(); /* on the first iteration, adjust the initial step bound. */ if (iter == 1) delta = (std::min)(delta,pnorm); /* evaluate the function at x + p and calculate its norm. */ if ( functor(wa2, wa4) < 0) return LevenbergMarquardtSpace::UserAsked; ++nfev; fnorm1 = wa4.stableNorm(); /* compute the scaled actual reduction. */ actred = -1.; if (Scalar(.1) * fnorm1 < fnorm) actred = 1. - numext::abs2(fnorm1 / fnorm); /* compute the scaled predicted reduction and */ /* the scaled directional derivative. */ wa3 = fjac.topLeftCorner(n,n).template triangularView() * (permutation.inverse() * wa1); temp1 = numext::abs2(wa3.stableNorm() / fnorm); temp2 = numext::abs2(sqrt(par) * pnorm / fnorm); prered = temp1 + temp2 / Scalar(.5); dirder = -(temp1 + temp2); /* compute the ratio of the actual to the predicted */ /* reduction. */ ratio = 0.; if (prered != 0.) ratio = actred / prered; /* update the step bound. */ if (ratio <= Scalar(.25)) { if (actred >= 0.) temp = Scalar(.5); if (actred < 0.) temp = Scalar(.5) * dirder / (dirder + Scalar(.5) * actred); if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1)) temp = Scalar(.1); /* Computing MIN */ delta = temp * (std::min)(delta, pnorm / Scalar(.1)); par /= temp; } else if (!(par != 0. && ratio < Scalar(.75))) { delta = pnorm / Scalar(.5); par = Scalar(.5) * par; } /* test for successful iteration. */ if (ratio >= Scalar(1e-4)) { /* successful iteration. update x, fvec, and their norms. */ x = wa2; wa2 = diag.cwiseProduct(x); fvec = wa4; xnorm = wa2.stableNorm(); fnorm = fnorm1; ++iter; } /* tests for convergence. */ if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm) return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall; if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.) return LevenbergMarquardtSpace::RelativeReductionTooSmall; if (delta <= parameters.xtol * xnorm) return LevenbergMarquardtSpace::RelativeErrorTooSmall; /* tests for termination and stringent tolerances. */ if (nfev >= parameters.maxfev) return LevenbergMarquardtSpace::TooManyFunctionEvaluation; if (abs(actred) <= NumTraits::epsilon() && prered <= NumTraits::epsilon() && Scalar(.5) * ratio <= 1.) return LevenbergMarquardtSpace::FtolTooSmall; if (delta <= NumTraits::epsilon() * xnorm) return LevenbergMarquardtSpace::XtolTooSmall; if (gnorm <= NumTraits::epsilon()) return LevenbergMarquardtSpace::GtolTooSmall; } while (ratio < Scalar(1e-4)); return LevenbergMarquardtSpace::Running; } template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimizeOptimumStorage(FVectorType &x) { LevenbergMarquardtSpace::Status status = minimizeOptimumStorageInit(x); if (status==LevenbergMarquardtSpace::ImproperInputParameters) return status; do { status = minimizeOptimumStorageOneStep(x); } while (status==LevenbergMarquardtSpace::Running); return status; } template LevenbergMarquardtSpace::Status LevenbergMarquardt::lmdif1( FunctorType &functor, FVectorType &x, Index *nfev, const Scalar tol ) { Index n = x.size(); Index m = functor.values(); /* check the input parameters for errors. */ if (n <= 0 || m < n || tol < 0.) return LevenbergMarquardtSpace::ImproperInputParameters; NumericalDiff numDiff(functor); // embedded LevenbergMarquardt LevenbergMarquardt, Scalar > lm(numDiff); lm.parameters.ftol = tol; lm.parameters.xtol = tol; lm.parameters.maxfev = 200*(n+1); LevenbergMarquardtSpace::Status info = LevenbergMarquardtSpace::Status(lm.minimize(x)); if (nfev) * nfev = lm.nfev; return info; } } // end namespace Eigen #endif // EIGEN_LEVENBERGMARQUARDT__H //vim: ai ts=4 sts=4 et sw=4 RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/chkder.h0000644000176200001440000000351012253717461026650 0ustar liggesusers#define chkder_log10e 0.43429448190325182765 #define chkder_factor 100. namespace Eigen { namespace internal { template void chkder( const Matrix< Scalar, Dynamic, 1 > &x, const Matrix< Scalar, Dynamic, 1 > &fvec, const Matrix< Scalar, Dynamic, Dynamic > &fjac, Matrix< Scalar, Dynamic, 1 > &xp, const Matrix< Scalar, Dynamic, 1 > &fvecp, int mode, Matrix< Scalar, Dynamic, 1 > &err ) { using std::sqrt; using std::abs; using std::log; typedef DenseIndex Index; const Scalar eps = sqrt(NumTraits::epsilon()); const Scalar epsf = chkder_factor * NumTraits::epsilon(); const Scalar epslog = chkder_log10e * log(eps); Scalar temp; const Index m = fvec.size(), n = x.size(); if (mode != 2) { /* mode = 1. */ xp.resize(n); for (Index j = 0; j < n; ++j) { temp = eps * abs(x[j]); if (temp == 0.) temp = eps; xp[j] = x[j] + temp; } } else { /* mode = 2. */ err.setZero(m); for (Index j = 0; j < n; ++j) { temp = abs(x[j]); if (temp == 0.) temp = 1.; err += temp * fjac.col(j); } for (Index i = 0; i < m; ++i) { temp = 1.; if (fvec[i] != 0. && fvecp[i] != 0. && abs(fvecp[i] - fvec[i]) >= epsf * abs(fvec[i])) temp = eps * abs((fvecp[i] - fvec[i]) / eps - err[i]) / (abs(fvec[i]) + abs(fvecp[i])); err[i] = 1.; if (temp > NumTraits::epsilon() && temp < eps) err[i] = (chkder_log10e * log(temp) - epslog) / epslog; if (temp >= eps) err[i] = 0.; } } } } // end namespace internal } // end namespace Eigen RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h0000644000176200001440000000627712253717461026753 0ustar liggesusersnamespace Eigen { namespace internal { // TODO : once qrsolv2 is removed, use ColPivHouseholderQR or PermutationMatrix instead of ipvt template void qrsolv( Matrix< Scalar, Dynamic, Dynamic > &s, // TODO : use a PermutationMatrix once lmpar is no more: const VectorXi &ipvt, const Matrix< Scalar, Dynamic, 1 > &diag, const Matrix< Scalar, Dynamic, 1 > &qtb, Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &sdiag) { typedef DenseIndex Index; /* Local variables */ Index i, j, k, l; Scalar temp; Index n = s.cols(); Matrix< Scalar, Dynamic, 1 > wa(n); JacobiRotation givens; /* Function Body */ // the following will only change the lower triangular part of s, including // the diagonal, though the diagonal is restored afterward /* copy r and (q transpose)*b to preserve input and initialize s. */ /* in particular, save the diagonal elements of r in x. */ x = s.diagonal(); wa = qtb; s.topLeftCorner(n,n).template triangularView() = s.topLeftCorner(n,n).transpose(); /* eliminate the diagonal matrix d using a givens rotation. */ for (j = 0; j < n; ++j) { /* prepare the row of d to be eliminated, locating the */ /* diagonal element using p from the qr factorization. */ l = ipvt[j]; if (diag[l] == 0.) break; sdiag.tail(n-j).setZero(); sdiag[j] = diag[l]; /* the transformations to eliminate the row of d */ /* modify only a single element of (q transpose)*b */ /* beyond the first n, which is initially zero. */ Scalar qtbpj = 0.; for (k = j; k < n; ++k) { /* determine a givens rotation which eliminates the */ /* appropriate element in the current row of d. */ givens.makeGivens(-s(k,k), sdiag[k]); /* compute the modified diagonal element of r and */ /* the modified element of ((q transpose)*b,0). */ s(k,k) = givens.c() * s(k,k) + givens.s() * sdiag[k]; temp = givens.c() * wa[k] + givens.s() * qtbpj; qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj; wa[k] = temp; /* accumulate the tranformation in the row of s. */ for (i = k+1; i().solveInPlace(wa.head(nsing)); // restore sdiag = s.diagonal(); s.diagonal() = x; /* permute the components of z back to components of x. */ for (j = 0; j < n; ++j) x[ipvt[j]] = wa[j]; } } // end namespace internal } // end namespace Eigen RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h0000644000176200001440000000207112253717461026642 0ustar liggesusersnamespace Eigen { namespace internal { // TODO : move this to GivensQR once there's such a thing in Eigen template void r1mpyq(DenseIndex m, DenseIndex n, Scalar *a, const std::vector > &v_givens, const std::vector > &w_givens) { typedef DenseIndex Index; /* apply the first set of givens rotations to a. */ for (Index j = n-2; j>=0; --j) for (Index i = 0; i void dogleg( const Matrix< Scalar, Dynamic, Dynamic > &qrfac, const Matrix< Scalar, Dynamic, 1 > &diag, const Matrix< Scalar, Dynamic, 1 > &qtb, Scalar delta, Matrix< Scalar, Dynamic, 1 > &x) { using std::abs; using std::sqrt; typedef DenseIndex Index; /* Local variables */ Index i, j; Scalar sum, temp, alpha, bnorm; Scalar gnorm, qnorm; Scalar sgnorm; /* Function Body */ const Scalar epsmch = NumTraits::epsilon(); const Index n = qrfac.cols(); eigen_assert(n==qtb.size()); eigen_assert(n==x.size()); eigen_assert(n==diag.size()); Matrix< Scalar, Dynamic, 1 > wa1(n), wa2(n); /* first, calculate the gauss-newton direction. */ for (j = n-1; j >=0; --j) { temp = qrfac(j,j); if (temp == 0.) { temp = epsmch * qrfac.col(j).head(j+1).maxCoeff(); if (temp == 0.) temp = epsmch; } if (j==n-1) x[j] = qtb[j] / temp; else x[j] = (qtb[j] - qrfac.row(j).tail(n-j-1).dot(x.tail(n-j-1))) / temp; } /* test whether the gauss-newton direction is acceptable. */ qnorm = diag.cwiseProduct(x).stableNorm(); if (qnorm <= delta) return; // TODO : this path is not tested by Eigen unit tests /* the gauss-newton direction is not acceptable. */ /* next, calculate the scaled gradient direction. */ wa1.fill(0.); for (j = 0; j < n; ++j) { wa1.tail(n-j) += qrfac.row(j).tail(n-j) * qtb[j]; wa1[j] /= diag[j]; } /* calculate the norm of the scaled gradient and test for */ /* the special case in which the scaled gradient is zero. */ gnorm = wa1.stableNorm(); sgnorm = 0.; alpha = delta / qnorm; if (gnorm == 0.) goto algo_end; /* calculate the point along the scaled gradient */ /* at which the quadratic is minimized. */ wa1.array() /= (diag*gnorm).array(); // TODO : once unit tests cover this part,: // wa2 = qrfac.template triangularView() * wa1; for (j = 0; j < n; ++j) { sum = 0.; for (i = j; i < n; ++i) { sum += qrfac(j,i) * wa1[i]; } wa2[j] = sum; } temp = wa2.stableNorm(); sgnorm = gnorm / temp / temp; /* test whether the scaled gradient direction is acceptable. */ alpha = 0.; if (sgnorm >= delta) goto algo_end; /* the scaled gradient direction is not acceptable. */ /* finally, calculate the point along the dogleg */ /* at which the quadratic is minimized. */ bnorm = qtb.stableNorm(); temp = bnorm / gnorm * (bnorm / qnorm) * (sgnorm / delta); temp = temp - delta / qnorm * numext::abs2(sgnorm / delta) + sqrt(numext::abs2(temp - delta / qnorm) + (1.-numext::abs2(delta / qnorm)) * (1.-numext::abs2(sgnorm / delta))); alpha = delta / qnorm * (1. - numext::abs2(sgnorm / delta)) / temp; algo_end: /* form appropriate convex combination of the gauss-newton */ /* direction and the scaled gradient direction. */ temp = (1.-alpha) * (std::min)(sgnorm,delta); x = temp * wa1 + alpha * x; } } // end namespace internal } // end namespace Eigen RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h0000644000176200001440000004656413100733213031633 0ustar liggesusers// -*- coding: utf-8 // vim: set fileencoding=utf-8 // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Thomas Capricelli // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_HYBRIDNONLINEARSOLVER_H #define EIGEN_HYBRIDNONLINEARSOLVER_H namespace Eigen { namespace HybridNonLinearSolverSpace { enum Status { Running = -1, ImproperInputParameters = 0, RelativeErrorTooSmall = 1, TooManyFunctionEvaluation = 2, TolTooSmall = 3, NotMakingProgressJacobian = 4, NotMakingProgressIterations = 5, UserAsked = 6 }; } /** * \ingroup NonLinearOptimization_Module * \brief Finds a zero of a system of n * nonlinear functions in n variables by a modification of the Powell * hybrid method ("dogleg"). * * The user must provide a subroutine which calculates the * functions. The Jacobian is either provided by the user, or approximated * using a forward-difference method. * */ template class HybridNonLinearSolver { public: typedef DenseIndex Index; HybridNonLinearSolver(FunctorType &_functor) : functor(_functor) { nfev=njev=iter = 0; fnorm= 0.; useExternalScaling=false;} struct Parameters { Parameters() : factor(Scalar(100.)) , maxfev(1000) , xtol(std::sqrt(NumTraits::epsilon())) , nb_of_subdiagonals(-1) , nb_of_superdiagonals(-1) , epsfcn(Scalar(0.)) {} Scalar factor; Index maxfev; // maximum number of function evaluation Scalar xtol; Index nb_of_subdiagonals; Index nb_of_superdiagonals; Scalar epsfcn; }; typedef Matrix< Scalar, Dynamic, 1 > FVectorType; typedef Matrix< Scalar, Dynamic, Dynamic > JacobianType; /* TODO: if eigen provides a triangular storage, use it here */ typedef Matrix< Scalar, Dynamic, Dynamic > UpperTriangularType; HybridNonLinearSolverSpace::Status hybrj1( FVectorType &x, const Scalar tol = std::sqrt(NumTraits::epsilon()) ); HybridNonLinearSolverSpace::Status solveInit(FVectorType &x); HybridNonLinearSolverSpace::Status solveOneStep(FVectorType &x); HybridNonLinearSolverSpace::Status solve(FVectorType &x); HybridNonLinearSolverSpace::Status hybrd1( FVectorType &x, const Scalar tol = std::sqrt(NumTraits::epsilon()) ); HybridNonLinearSolverSpace::Status solveNumericalDiffInit(FVectorType &x); HybridNonLinearSolverSpace::Status solveNumericalDiffOneStep(FVectorType &x); HybridNonLinearSolverSpace::Status solveNumericalDiff(FVectorType &x); void resetParameters(void) { parameters = Parameters(); } Parameters parameters; FVectorType fvec, qtf, diag; JacobianType fjac; UpperTriangularType R; Index nfev; Index njev; Index iter; Scalar fnorm; bool useExternalScaling; private: FunctorType &functor; Index n; Scalar sum; bool sing; Scalar temp; Scalar delta; bool jeval; Index ncsuc; Scalar ratio; Scalar pnorm, xnorm, fnorm1; Index nslow1, nslow2; Index ncfail; Scalar actred, prered; FVectorType wa1, wa2, wa3, wa4; HybridNonLinearSolver& operator=(const HybridNonLinearSolver&); }; template HybridNonLinearSolverSpace::Status HybridNonLinearSolver::hybrj1( FVectorType &x, const Scalar tol ) { n = x.size(); /* check the input parameters for errors. */ if (n <= 0 || tol < 0.) return HybridNonLinearSolverSpace::ImproperInputParameters; resetParameters(); parameters.maxfev = 100*(n+1); parameters.xtol = tol; diag.setConstant(n, 1.); useExternalScaling = true; return solve(x); } template HybridNonLinearSolverSpace::Status HybridNonLinearSolver::solveInit(FVectorType &x) { n = x.size(); wa1.resize(n); wa2.resize(n); wa3.resize(n); wa4.resize(n); fvec.resize(n); qtf.resize(n); fjac.resize(n, n); if (!useExternalScaling) diag.resize(n); eigen_assert( (!useExternalScaling || diag.size()==n) && "When useExternalScaling is set, the caller must provide a valid 'diag'"); /* Function Body */ nfev = 0; njev = 0; /* check the input parameters for errors. */ if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0. ) return HybridNonLinearSolverSpace::ImproperInputParameters; if (useExternalScaling) for (Index j = 0; j < n; ++j) if (diag[j] <= 0.) return HybridNonLinearSolverSpace::ImproperInputParameters; /* evaluate the function at the starting point */ /* and calculate its norm. */ nfev = 1; if ( functor(x, fvec) < 0) return HybridNonLinearSolverSpace::UserAsked; fnorm = fvec.stableNorm(); /* initialize iteration counter and monitors. */ iter = 1; ncsuc = 0; ncfail = 0; nslow1 = 0; nslow2 = 0; return HybridNonLinearSolverSpace::Running; } template HybridNonLinearSolverSpace::Status HybridNonLinearSolver::solveOneStep(FVectorType &x) { using std::abs; eigen_assert(x.size()==n); // check the caller is not cheating us Index j; std::vector > v_givens(n), w_givens(n); jeval = true; /* calculate the jacobian matrix. */ if ( functor.df(x, fjac) < 0) return HybridNonLinearSolverSpace::UserAsked; ++njev; wa2 = fjac.colwise().blueNorm(); /* on the first iteration and if external scaling is not used, scale according */ /* to the norms of the columns of the initial jacobian. */ if (iter == 1) { if (!useExternalScaling) for (j = 0; j < n; ++j) diag[j] = (wa2[j]==0.) ? 1. : wa2[j]; /* on the first iteration, calculate the norm of the scaled x */ /* and initialize the step bound delta. */ xnorm = diag.cwiseProduct(x).stableNorm(); delta = parameters.factor * xnorm; if (delta == 0.) delta = parameters.factor; } /* compute the qr factorization of the jacobian. */ HouseholderQR qrfac(fjac); // no pivoting: /* copy the triangular factor of the qr factorization into r. */ R = qrfac.matrixQR(); /* accumulate the orthogonal factor in fjac. */ fjac = qrfac.householderQ(); /* form (q transpose)*fvec and store in qtf. */ qtf = fjac.transpose() * fvec; /* rescale if necessary. */ if (!useExternalScaling) diag = diag.cwiseMax(wa2); while (true) { /* determine the direction p. */ internal::dogleg(R, diag, qtf, delta, wa1); /* store the direction p and x + p. calculate the norm of p. */ wa1 = -wa1; wa2 = x + wa1; pnorm = diag.cwiseProduct(wa1).stableNorm(); /* on the first iteration, adjust the initial step bound. */ if (iter == 1) delta = (std::min)(delta,pnorm); /* evaluate the function at x + p and calculate its norm. */ if ( functor(wa2, wa4) < 0) return HybridNonLinearSolverSpace::UserAsked; ++nfev; fnorm1 = wa4.stableNorm(); /* compute the scaled actual reduction. */ actred = -1.; if (fnorm1 < fnorm) /* Computing 2nd power */ actred = 1. - numext::abs2(fnorm1 / fnorm); /* compute the scaled predicted reduction. */ wa3 = R.template triangularView()*wa1 + qtf; temp = wa3.stableNorm(); prered = 0.; if (temp < fnorm) /* Computing 2nd power */ prered = 1. - numext::abs2(temp / fnorm); /* compute the ratio of the actual to the predicted reduction. */ ratio = 0.; if (prered > 0.) ratio = actred / prered; /* update the step bound. */ if (ratio < Scalar(.1)) { ncsuc = 0; ++ncfail; delta = Scalar(.5) * delta; } else { ncfail = 0; ++ncsuc; if (ratio >= Scalar(.5) || ncsuc > 1) delta = (std::max)(delta, pnorm / Scalar(.5)); if (abs(ratio - 1.) <= Scalar(.1)) { delta = pnorm / Scalar(.5); } } /* test for successful iteration. */ if (ratio >= Scalar(1e-4)) { /* successful iteration. update x, fvec, and their norms. */ x = wa2; wa2 = diag.cwiseProduct(x); fvec = wa4; xnorm = wa2.stableNorm(); fnorm = fnorm1; ++iter; } /* determine the progress of the iteration. */ ++nslow1; if (actred >= Scalar(.001)) nslow1 = 0; if (jeval) ++nslow2; if (actred >= Scalar(.1)) nslow2 = 0; /* test for convergence. */ if (delta <= parameters.xtol * xnorm || fnorm == 0.) return HybridNonLinearSolverSpace::RelativeErrorTooSmall; /* tests for termination and stringent tolerances. */ if (nfev >= parameters.maxfev) return HybridNonLinearSolverSpace::TooManyFunctionEvaluation; if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits::epsilon() * xnorm) return HybridNonLinearSolverSpace::TolTooSmall; if (nslow2 == 5) return HybridNonLinearSolverSpace::NotMakingProgressJacobian; if (nslow1 == 10) return HybridNonLinearSolverSpace::NotMakingProgressIterations; /* criterion for recalculating jacobian. */ if (ncfail == 2) break; // leave inner loop and go for the next outer loop iteration /* calculate the rank one modification to the jacobian */ /* and update qtf if necessary. */ wa1 = diag.cwiseProduct( diag.cwiseProduct(wa1)/pnorm ); wa2 = fjac.transpose() * wa4; if (ratio >= Scalar(1e-4)) qtf = wa2; wa2 = (wa2-wa3)/pnorm; /* compute the qr factorization of the updated jacobian. */ internal::r1updt(R, wa1, v_givens, w_givens, wa2, wa3, &sing); internal::r1mpyq(n, n, fjac.data(), v_givens, w_givens); internal::r1mpyq(1, n, qtf.data(), v_givens, w_givens); jeval = false; } return HybridNonLinearSolverSpace::Running; } template HybridNonLinearSolverSpace::Status HybridNonLinearSolver::solve(FVectorType &x) { HybridNonLinearSolverSpace::Status status = solveInit(x); if (status==HybridNonLinearSolverSpace::ImproperInputParameters) return status; while (status==HybridNonLinearSolverSpace::Running) status = solveOneStep(x); return status; } template HybridNonLinearSolverSpace::Status HybridNonLinearSolver::hybrd1( FVectorType &x, const Scalar tol ) { n = x.size(); /* check the input parameters for errors. */ if (n <= 0 || tol < 0.) return HybridNonLinearSolverSpace::ImproperInputParameters; resetParameters(); parameters.maxfev = 200*(n+1); parameters.xtol = tol; diag.setConstant(n, 1.); useExternalScaling = true; return solveNumericalDiff(x); } template HybridNonLinearSolverSpace::Status HybridNonLinearSolver::solveNumericalDiffInit(FVectorType &x) { n = x.size(); if (parameters.nb_of_subdiagonals<0) parameters.nb_of_subdiagonals= n-1; if (parameters.nb_of_superdiagonals<0) parameters.nb_of_superdiagonals= n-1; wa1.resize(n); wa2.resize(n); wa3.resize(n); wa4.resize(n); qtf.resize(n); fjac.resize(n, n); fvec.resize(n); if (!useExternalScaling) diag.resize(n); eigen_assert( (!useExternalScaling || diag.size()==n) && "When useExternalScaling is set, the caller must provide a valid 'diag'"); /* Function Body */ nfev = 0; njev = 0; /* check the input parameters for errors. */ if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.nb_of_subdiagonals< 0 || parameters.nb_of_superdiagonals< 0 || parameters.factor <= 0. ) return HybridNonLinearSolverSpace::ImproperInputParameters; if (useExternalScaling) for (Index j = 0; j < n; ++j) if (diag[j] <= 0.) return HybridNonLinearSolverSpace::ImproperInputParameters; /* evaluate the function at the starting point */ /* and calculate its norm. */ nfev = 1; if ( functor(x, fvec) < 0) return HybridNonLinearSolverSpace::UserAsked; fnorm = fvec.stableNorm(); /* initialize iteration counter and monitors. */ iter = 1; ncsuc = 0; ncfail = 0; nslow1 = 0; nslow2 = 0; return HybridNonLinearSolverSpace::Running; } template HybridNonLinearSolverSpace::Status HybridNonLinearSolver::solveNumericalDiffOneStep(FVectorType &x) { using std::sqrt; using std::abs; assert(x.size()==n); // check the caller is not cheating us Index j; std::vector > v_givens(n), w_givens(n); jeval = true; if (parameters.nb_of_subdiagonals<0) parameters.nb_of_subdiagonals= n-1; if (parameters.nb_of_superdiagonals<0) parameters.nb_of_superdiagonals= n-1; /* calculate the jacobian matrix. */ if (internal::fdjac1(functor, x, fvec, fjac, parameters.nb_of_subdiagonals, parameters.nb_of_superdiagonals, parameters.epsfcn) <0) return HybridNonLinearSolverSpace::UserAsked; nfev += (std::min)(parameters.nb_of_subdiagonals+parameters.nb_of_superdiagonals+ 1, n); wa2 = fjac.colwise().blueNorm(); /* on the first iteration and if external scaling is not used, scale according */ /* to the norms of the columns of the initial jacobian. */ if (iter == 1) { if (!useExternalScaling) for (j = 0; j < n; ++j) diag[j] = (wa2[j]==0.) ? 1. : wa2[j]; /* on the first iteration, calculate the norm of the scaled x */ /* and initialize the step bound delta. */ xnorm = diag.cwiseProduct(x).stableNorm(); delta = parameters.factor * xnorm; if (delta == 0.) delta = parameters.factor; } /* compute the qr factorization of the jacobian. */ HouseholderQR qrfac(fjac); // no pivoting: /* copy the triangular factor of the qr factorization into r. */ R = qrfac.matrixQR(); /* accumulate the orthogonal factor in fjac. */ fjac = qrfac.householderQ(); /* form (q transpose)*fvec and store in qtf. */ qtf = fjac.transpose() * fvec; /* rescale if necessary. */ if (!useExternalScaling) diag = diag.cwiseMax(wa2); while (true) { /* determine the direction p. */ internal::dogleg(R, diag, qtf, delta, wa1); /* store the direction p and x + p. calculate the norm of p. */ wa1 = -wa1; wa2 = x + wa1; pnorm = diag.cwiseProduct(wa1).stableNorm(); /* on the first iteration, adjust the initial step bound. */ if (iter == 1) delta = (std::min)(delta,pnorm); /* evaluate the function at x + p and calculate its norm. */ if ( functor(wa2, wa4) < 0) return HybridNonLinearSolverSpace::UserAsked; ++nfev; fnorm1 = wa4.stableNorm(); /* compute the scaled actual reduction. */ actred = -1.; if (fnorm1 < fnorm) /* Computing 2nd power */ actred = 1. - numext::abs2(fnorm1 / fnorm); /* compute the scaled predicted reduction. */ wa3 = R.template triangularView()*wa1 + qtf; temp = wa3.stableNorm(); prered = 0.; if (temp < fnorm) /* Computing 2nd power */ prered = 1. - numext::abs2(temp / fnorm); /* compute the ratio of the actual to the predicted reduction. */ ratio = 0.; if (prered > 0.) ratio = actred / prered; /* update the step bound. */ if (ratio < Scalar(.1)) { ncsuc = 0; ++ncfail; delta = Scalar(.5) * delta; } else { ncfail = 0; ++ncsuc; if (ratio >= Scalar(.5) || ncsuc > 1) delta = (std::max)(delta, pnorm / Scalar(.5)); if (abs(ratio - 1.) <= Scalar(.1)) { delta = pnorm / Scalar(.5); } } /* test for successful iteration. */ if (ratio >= Scalar(1e-4)) { /* successful iteration. update x, fvec, and their norms. */ x = wa2; wa2 = diag.cwiseProduct(x); fvec = wa4; xnorm = wa2.stableNorm(); fnorm = fnorm1; ++iter; } /* determine the progress of the iteration. */ ++nslow1; if (actred >= Scalar(.001)) nslow1 = 0; if (jeval) ++nslow2; if (actred >= Scalar(.1)) nslow2 = 0; /* test for convergence. */ if (delta <= parameters.xtol * xnorm || fnorm == 0.) return HybridNonLinearSolverSpace::RelativeErrorTooSmall; /* tests for termination and stringent tolerances. */ if (nfev >= parameters.maxfev) return HybridNonLinearSolverSpace::TooManyFunctionEvaluation; if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits::epsilon() * xnorm) return HybridNonLinearSolverSpace::TolTooSmall; if (nslow2 == 5) return HybridNonLinearSolverSpace::NotMakingProgressJacobian; if (nslow1 == 10) return HybridNonLinearSolverSpace::NotMakingProgressIterations; /* criterion for recalculating jacobian. */ if (ncfail == 2) break; // leave inner loop and go for the next outer loop iteration /* calculate the rank one modification to the jacobian */ /* and update qtf if necessary. */ wa1 = diag.cwiseProduct( diag.cwiseProduct(wa1)/pnorm ); wa2 = fjac.transpose() * wa4; if (ratio >= Scalar(1e-4)) qtf = wa2; wa2 = (wa2-wa3)/pnorm; /* compute the qr factorization of the updated jacobian. */ internal::r1updt(R, wa1, v_givens, w_givens, wa2, wa3, &sing); internal::r1mpyq(n, n, fjac.data(), v_givens, w_givens); internal::r1mpyq(1, n, qtf.data(), v_givens, w_givens); jeval = false; } return HybridNonLinearSolverSpace::Running; } template HybridNonLinearSolverSpace::Status HybridNonLinearSolver::solveNumericalDiff(FVectorType &x) { HybridNonLinearSolverSpace::Status status = solveNumericalDiffInit(x); if (status==HybridNonLinearSolverSpace::ImproperInputParameters) return status; while (status==HybridNonLinearSolverSpace::Running) status = solveNumericalDiffOneStep(x); return status; } } // end namespace Eigen #endif // EIGEN_HYBRIDNONLINEARSOLVER_H //vim: ai ts=4 sts=4 et sw=4 RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/r1updt.h0000644000176200001440000000601212253717461026627 0ustar liggesusersnamespace Eigen { namespace internal { template void r1updt( Matrix< Scalar, Dynamic, Dynamic > &s, const Matrix< Scalar, Dynamic, 1> &u, std::vector > &v_givens, std::vector > &w_givens, Matrix< Scalar, Dynamic, 1> &v, Matrix< Scalar, Dynamic, 1> &w, bool *sing) { typedef DenseIndex Index; const JacobiRotation IdentityRotation = JacobiRotation(1,0); /* Local variables */ const Index m = s.rows(); const Index n = s.cols(); Index i, j=1; Scalar temp; JacobiRotation givens; // r1updt had a broader usecase, but we dont use it here. And, more // importantly, we can not test it. eigen_assert(m==n); eigen_assert(u.size()==m); eigen_assert(v.size()==n); eigen_assert(w.size()==n); /* move the nontrivial part of the last column of s into w. */ w[n-1] = s(n-1,n-1); /* rotate the vector v into a multiple of the n-th unit vector */ /* in such a way that a spike is introduced into w. */ for (j=n-2; j>=0; --j) { w[j] = 0.; if (v[j] != 0.) { /* determine a givens rotation which eliminates the */ /* j-th element of v. */ givens.makeGivens(-v[n-1], v[j]); /* apply the transformation to v and store the information */ /* necessary to recover the givens rotation. */ v[n-1] = givens.s() * v[j] + givens.c() * v[n-1]; v_givens[j] = givens; /* apply the transformation to s and extend the spike in w. */ for (i = j; i < m; ++i) { temp = givens.c() * s(j,i) - givens.s() * w[i]; w[i] = givens.s() * s(j,i) + givens.c() * w[i]; s(j,i) = temp; } } else v_givens[j] = IdentityRotation; } /* add the spike from the rank 1 update to w. */ w += v[n-1] * u; /* eliminate the spike. */ *sing = false; for (j = 0; j < n-1; ++j) { if (w[j] != 0.) { /* determine a givens rotation which eliminates the */ /* j-th element of the spike. */ givens.makeGivens(-s(j,j), w[j]); /* apply the transformation to s and reduce the spike in w. */ for (i = j; i < m; ++i) { temp = givens.c() * s(j,i) + givens.s() * w[i]; w[i] = -givens.s() * s(j,i) + givens.c() * w[i]; s(j,i) = temp; } /* store the information necessary to recover the */ /* givens rotation. */ w_givens[j] = givens; } else v_givens[j] = IdentityRotation; /* test for zero diagonal elements in the output s. */ if (s(j,j) == 0.) { *sing = true; } } /* move w back into the last column of the output s. */ s(n-1,n-1) = w[n-1]; if (s(j,j) == 0.) { *sing = true; } return; } } // end namespace internal } // end namespace Eigen RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/lmpar.h0000644000176200001440000002162712253717461026534 0ustar liggesusersnamespace Eigen { namespace internal { template void lmpar( Matrix< Scalar, Dynamic, Dynamic > &r, const VectorXi &ipvt, const Matrix< Scalar, Dynamic, 1 > &diag, const Matrix< Scalar, Dynamic, 1 > &qtb, Scalar delta, Scalar &par, Matrix< Scalar, Dynamic, 1 > &x) { using std::abs; using std::sqrt; typedef DenseIndex Index; /* Local variables */ Index i, j, l; Scalar fp; Scalar parc, parl; Index iter; Scalar temp, paru; Scalar gnorm; Scalar dxnorm; /* Function Body */ const Scalar dwarf = (std::numeric_limits::min)(); const Index n = r.cols(); eigen_assert(n==diag.size()); eigen_assert(n==qtb.size()); eigen_assert(n==x.size()); Matrix< Scalar, Dynamic, 1 > wa1, wa2; /* compute and store in x the gauss-newton direction. if the */ /* jacobian is rank-deficient, obtain a least squares solution. */ Index nsing = n-1; wa1 = qtb; for (j = 0; j < n; ++j) { if (r(j,j) == 0. && nsing == n-1) nsing = j - 1; if (nsing < n-1) wa1[j] = 0.; } for (j = nsing; j>=0; --j) { wa1[j] /= r(j,j); temp = wa1[j]; for (i = 0; i < j ; ++i) wa1[i] -= r(i,j) * temp; } for (j = 0; j < n; ++j) x[ipvt[j]] = wa1[j]; /* initialize the iteration counter. */ /* evaluate the function at the origin, and test */ /* for acceptance of the gauss-newton direction. */ iter = 0; wa2 = diag.cwiseProduct(x); dxnorm = wa2.blueNorm(); fp = dxnorm - delta; if (fp <= Scalar(0.1) * delta) { par = 0; return; } /* if the jacobian is not rank deficient, the newton */ /* step provides a lower bound, parl, for the zero of */ /* the function. otherwise set this bound to zero. */ parl = 0.; if (nsing >= n-1) { for (j = 0; j < n; ++j) { l = ipvt[j]; wa1[j] = diag[l] * (wa2[l] / dxnorm); } // it's actually a triangularView.solveInplace(), though in a weird // way: for (j = 0; j < n; ++j) { Scalar sum = 0.; for (i = 0; i < j; ++i) sum += r(i,j) * wa1[i]; wa1[j] = (wa1[j] - sum) / r(j,j); } temp = wa1.blueNorm(); parl = fp / delta / temp / temp; } /* calculate an upper bound, paru, for the zero of the function. */ for (j = 0; j < n; ++j) wa1[j] = r.col(j).head(j+1).dot(qtb.head(j+1)) / diag[ipvt[j]]; gnorm = wa1.stableNorm(); paru = gnorm / delta; if (paru == 0.) paru = dwarf / (std::min)(delta,Scalar(0.1)); /* if the input par lies outside of the interval (parl,paru), */ /* set par to the closer endpoint. */ par = (std::max)(par,parl); par = (std::min)(par,paru); if (par == 0.) par = gnorm / dxnorm; /* beginning of an iteration. */ while (true) { ++iter; /* evaluate the function at the current value of par. */ if (par == 0.) par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */ wa1 = sqrt(par)* diag; Matrix< Scalar, Dynamic, 1 > sdiag(n); qrsolv(r, ipvt, wa1, qtb, x, sdiag); wa2 = diag.cwiseProduct(x); dxnorm = wa2.blueNorm(); temp = fp; fp = dxnorm - delta; /* if the function is small enough, accept the current value */ /* of par. also test for the exceptional cases where parl */ /* is zero or the number of iterations has reached 10. */ if (abs(fp) <= Scalar(0.1) * delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10) break; /* compute the newton correction. */ for (j = 0; j < n; ++j) { l = ipvt[j]; wa1[j] = diag[l] * (wa2[l] / dxnorm); } for (j = 0; j < n; ++j) { wa1[j] /= sdiag[j]; temp = wa1[j]; for (i = j+1; i < n; ++i) wa1[i] -= r(i,j) * temp; } temp = wa1.blueNorm(); parc = fp / delta / temp / temp; /* depending on the sign of the function, update parl or paru. */ if (fp > 0.) parl = (std::max)(parl,par); if (fp < 0.) paru = (std::min)(paru,par); /* compute an improved estimate for par. */ /* Computing MAX */ par = (std::max)(parl,par+parc); /* end of an iteration. */ } /* termination. */ if (iter == 0) par = 0.; return; } template void lmpar2( const ColPivHouseholderQR > &qr, const Matrix< Scalar, Dynamic, 1 > &diag, const Matrix< Scalar, Dynamic, 1 > &qtb, Scalar delta, Scalar &par, Matrix< Scalar, Dynamic, 1 > &x) { using std::sqrt; using std::abs; typedef DenseIndex Index; /* Local variables */ Index j; Scalar fp; Scalar parc, parl; Index iter; Scalar temp, paru; Scalar gnorm; Scalar dxnorm; /* Function Body */ const Scalar dwarf = (std::numeric_limits::min)(); const Index n = qr.matrixQR().cols(); eigen_assert(n==diag.size()); eigen_assert(n==qtb.size()); Matrix< Scalar, Dynamic, 1 > wa1, wa2; /* compute and store in x the gauss-newton direction. if the */ /* jacobian is rank-deficient, obtain a least squares solution. */ // const Index rank = qr.nonzeroPivots(); // exactly double(0.) const Index rank = qr.rank(); // use a threshold wa1 = qtb; wa1.tail(n-rank).setZero(); qr.matrixQR().topLeftCorner(rank, rank).template triangularView().solveInPlace(wa1.head(rank)); x = qr.colsPermutation()*wa1; /* initialize the iteration counter. */ /* evaluate the function at the origin, and test */ /* for acceptance of the gauss-newton direction. */ iter = 0; wa2 = diag.cwiseProduct(x); dxnorm = wa2.blueNorm(); fp = dxnorm - delta; if (fp <= Scalar(0.1) * delta) { par = 0; return; } /* if the jacobian is not rank deficient, the newton */ /* step provides a lower bound, parl, for the zero of */ /* the function. otherwise set this bound to zero. */ parl = 0.; if (rank==n) { wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2)/dxnorm; qr.matrixQR().topLeftCorner(n, n).transpose().template triangularView().solveInPlace(wa1); temp = wa1.blueNorm(); parl = fp / delta / temp / temp; } /* calculate an upper bound, paru, for the zero of the function. */ for (j = 0; j < n; ++j) wa1[j] = qr.matrixQR().col(j).head(j+1).dot(qtb.head(j+1)) / diag[qr.colsPermutation().indices()(j)]; gnorm = wa1.stableNorm(); paru = gnorm / delta; if (paru == 0.) paru = dwarf / (std::min)(delta,Scalar(0.1)); /* if the input par lies outside of the interval (parl,paru), */ /* set par to the closer endpoint. */ par = (std::max)(par,parl); par = (std::min)(par,paru); if (par == 0.) par = gnorm / dxnorm; /* beginning of an iteration. */ Matrix< Scalar, Dynamic, Dynamic > s = qr.matrixQR(); while (true) { ++iter; /* evaluate the function at the current value of par. */ if (par == 0.) par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */ wa1 = sqrt(par)* diag; Matrix< Scalar, Dynamic, 1 > sdiag(n); qrsolv(s, qr.colsPermutation().indices(), wa1, qtb, x, sdiag); wa2 = diag.cwiseProduct(x); dxnorm = wa2.blueNorm(); temp = fp; fp = dxnorm - delta; /* if the function is small enough, accept the current value */ /* of par. also test for the exceptional cases where parl */ /* is zero or the number of iterations has reached 10. */ if (abs(fp) <= Scalar(0.1) * delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10) break; /* compute the newton correction. */ wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2/dxnorm); // we could almost use this here, but the diagonal is outside qr, in sdiag[] // qr.matrixQR().topLeftCorner(n, n).transpose().template triangularView().solveInPlace(wa1); for (j = 0; j < n; ++j) { wa1[j] /= sdiag[j]; temp = wa1[j]; for (Index i = j+1; i < n; ++i) wa1[i] -= s(i,j) * temp; } temp = wa1.blueNorm(); parc = fp / delta / temp / temp; /* depending on the sign of the function, update parl or paru. */ if (fp > 0.) parl = (std::max)(parl,par); if (fp < 0.) paru = (std::min)(paru,par); /* compute an improved estimate for par. */ par = (std::max)(parl,par+parc); } if (iter == 0) par = 0.; return; } } // end namespace internal } // end namespace Eigen RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/covar.h0000644000176200001440000000357312253717461026533 0ustar liggesusersnamespace Eigen { namespace internal { template void covar( Matrix< Scalar, Dynamic, Dynamic > &r, const VectorXi &ipvt, Scalar tol = std::sqrt(NumTraits::epsilon()) ) { using std::abs; typedef DenseIndex Index; /* Local variables */ Index i, j, k, l, ii, jj; bool sing; Scalar temp; /* Function Body */ const Index n = r.cols(); const Scalar tolr = tol * abs(r(0,0)); Matrix< Scalar, Dynamic, 1 > wa(n); eigen_assert(ipvt.size()==n); /* form the inverse of r in the full upper triangle of r. */ l = -1; for (k = 0; k < n; ++k) if (abs(r(k,k)) > tolr) { r(k,k) = 1. / r(k,k); for (j = 0; j <= k-1; ++j) { temp = r(k,k) * r(j,k); r(j,k) = 0.; r.col(k).head(j+1) -= r.col(j).head(j+1) * temp; } l = k; } /* form the full upper triangle of the inverse of (r transpose)*r */ /* in the full upper triangle of r. */ for (k = 0; k <= l; ++k) { for (j = 0; j <= k-1; ++j) r.col(j).head(j+1) += r.col(k).head(j+1) * r(j,k); r.col(k).head(k+1) *= r(k,k); } /* form the full lower triangle of the covariance matrix */ /* in the strict lower triangle of r and in wa. */ for (j = 0; j < n; ++j) { jj = ipvt[j]; sing = j > l; for (i = 0; i <= j; ++i) { if (sing) r(i,j) = 0.; ii = ipvt[i]; if (ii > jj) r(ii,jj) = r(i,j); if (ii < jj) r(jj,ii) = r(i,j); } wa[jj] = r(j,j); } /* symmetrize the covariance matrix in r. */ r.topLeftCorner(n,n).template triangularView() = r.topLeftCorner(n,n).transpose(); r.diagonal() = wa; } } // end namespace internal } // end namespace Eigen RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h0000644000176200001440000000426112253717461026544 0ustar liggesusersnamespace Eigen { namespace internal { template DenseIndex fdjac1( const FunctorType &Functor, Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &fvec, Matrix< Scalar, Dynamic, Dynamic > &fjac, DenseIndex ml, DenseIndex mu, Scalar epsfcn) { using std::sqrt; using std::abs; typedef DenseIndex Index; /* Local variables */ Scalar h; Index j, k; Scalar eps, temp; Index msum; int iflag; Index start, length; /* Function Body */ const Scalar epsmch = NumTraits::epsilon(); const Index n = x.size(); eigen_assert(fvec.size()==n); Matrix< Scalar, Dynamic, 1 > wa1(n); Matrix< Scalar, Dynamic, 1 > wa2(n); eps = sqrt((std::max)(epsfcn,epsmch)); msum = ml + mu + 1; if (msum >= n) { /* computation of dense approximate jacobian. */ for (j = 0; j < n; ++j) { temp = x[j]; h = eps * abs(temp); if (h == 0.) h = eps; x[j] = temp + h; iflag = Functor(x, wa1); if (iflag < 0) return iflag; x[j] = temp; fjac.col(j) = (wa1-fvec)/h; } }else { /* computation of banded approximate jacobian. */ for (k = 0; k < msum; ++k) { for (j = k; (msum<0) ? (j>n): (jn): (j(0,j-mu); length = (std::min)(n-1, j+ml) - start + 1; fjac.col(j).segment(start, length) = ( wa1.segment(start, length)-fvec.segment(start, length))/h; } } } return 0; } } // end namespace internal } // end namespace Eigen RcppEigen/inst/include/unsupported/Eigen/src/Polynomials/0000755000176200001440000000000013100733213023233 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/Polynomials/Companion.h0000644000176200001440000001710112253717461025346 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Manuel Yguel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COMPANION_H #define EIGEN_COMPANION_H // This file requires the user to include // * Eigen/Core // * Eigen/src/PolynomialSolver.h namespace Eigen { namespace internal { #ifndef EIGEN_PARSED_BY_DOXYGEN template T radix(){ return 2; } template T radix2(){ return radix()*radix(); } template struct decrement_if_fixed_size { enum { ret = (Size == Dynamic) ? Dynamic : Size-1 }; }; #endif template< typename _Scalar, int _Deg > class companion { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg) enum { Deg = _Deg, Deg_1=decrement_if_fixed_size::ret }; typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef Matrix RightColumn; //typedef DiagonalMatrix< Scalar, Deg_1, Deg_1 > BottomLeftDiagonal; typedef Matrix BottomLeftDiagonal; typedef Matrix DenseCompanionMatrixType; typedef Matrix< Scalar, _Deg, Deg_1 > LeftBlock; typedef Matrix< Scalar, Deg_1, Deg_1 > BottomLeftBlock; typedef Matrix< Scalar, 1, Deg_1 > LeftBlockFirstRow; typedef DenseIndex Index; public: EIGEN_STRONG_INLINE const _Scalar operator()(Index row, Index col ) const { if( m_bl_diag.rows() > col ) { if( 0 < row ){ return m_bl_diag[col]; } else{ return 0; } } else{ return m_monic[row]; } } public: template void setPolynomial( const VectorType& poly ) { const Index deg = poly.size()-1; m_monic = -1/poly[deg] * poly.head(deg); //m_bl_diag.setIdentity( deg-1 ); m_bl_diag.setOnes(deg-1); } template companion( const VectorType& poly ){ setPolynomial( poly ); } public: DenseCompanionMatrixType denseMatrix() const { const Index deg = m_monic.size(); const Index deg_1 = deg-1; DenseCompanionMatrixType companion(deg,deg); companion << ( LeftBlock(deg,deg_1) << LeftBlockFirstRow::Zero(1,deg_1), BottomLeftBlock::Identity(deg-1,deg-1)*m_bl_diag.asDiagonal() ).finished() , m_monic; return companion; } protected: /** Helper function for the balancing algorithm. * \returns true if the row and the column, having colNorm and rowNorm * as norms, are balanced, false otherwise. * colB and rowB are repectively the multipliers for * the column and the row in order to balance them. * */ bool balanced( Scalar colNorm, Scalar rowNorm, bool& isBalanced, Scalar& colB, Scalar& rowB ); /** Helper function for the balancing algorithm. * \returns true if the row and the column, having colNorm and rowNorm * as norms, are balanced, false otherwise. * colB and rowB are repectively the multipliers for * the column and the row in order to balance them. * */ bool balancedR( Scalar colNorm, Scalar rowNorm, bool& isBalanced, Scalar& colB, Scalar& rowB ); public: /** * Balancing algorithm from B. N. PARLETT and C. REINSCH (1969) * "Balancing a matrix for calculation of eigenvalues and eigenvectors" * adapted to the case of companion matrices. * A matrix with non zero row and non zero column is balanced * for a certain norm if the i-th row and the i-th column * have same norm for all i. */ void balance(); protected: RightColumn m_monic; BottomLeftDiagonal m_bl_diag; }; template< typename _Scalar, int _Deg > inline bool companion<_Scalar,_Deg>::balanced( Scalar colNorm, Scalar rowNorm, bool& isBalanced, Scalar& colB, Scalar& rowB ) { if( Scalar(0) == colNorm || Scalar(0) == rowNorm ){ return true; } else { //To find the balancing coefficients, if the radix is 2, //one finds \f$ \sigma \f$ such that // \f$ 2^{2\sigma-1} < rowNorm / colNorm \le 2^{2\sigma+1} \f$ // then the balancing coefficient for the row is \f$ 1/2^{\sigma} \f$ // and the balancing coefficient for the column is \f$ 2^{\sigma} \f$ rowB = rowNorm / radix(); colB = Scalar(1); const Scalar s = colNorm + rowNorm; while (colNorm < rowB) { colB *= radix(); colNorm *= radix2(); } rowB = rowNorm * radix(); while (colNorm >= rowB) { colB /= radix(); colNorm /= radix2(); } //This line is used to avoid insubstantial balancing if ((rowNorm + colNorm) < Scalar(0.95) * s * colB) { isBalanced = false; rowB = Scalar(1) / colB; return false; } else{ return true; } } } template< typename _Scalar, int _Deg > inline bool companion<_Scalar,_Deg>::balancedR( Scalar colNorm, Scalar rowNorm, bool& isBalanced, Scalar& colB, Scalar& rowB ) { if( Scalar(0) == colNorm || Scalar(0) == rowNorm ){ return true; } else { /** * Set the norm of the column and the row to the geometric mean * of the row and column norm */ const _Scalar q = colNorm/rowNorm; if( !isApprox( q, _Scalar(1) ) ) { rowB = sqrt( colNorm/rowNorm ); colB = Scalar(1)/rowB; isBalanced = false; return false; } else{ return true; } } } template< typename _Scalar, int _Deg > void companion<_Scalar,_Deg>::balance() { using std::abs; EIGEN_STATIC_ASSERT( Deg == Dynamic || 1 < Deg, YOU_MADE_A_PROGRAMMING_MISTAKE ); const Index deg = m_monic.size(); const Index deg_1 = deg-1; bool hasConverged=false; while( !hasConverged ) { hasConverged = true; Scalar colNorm,rowNorm; Scalar colB,rowB; //First row, first column excluding the diagonal //============================================== colNorm = abs(m_bl_diag[0]); rowNorm = abs(m_monic[0]); //Compute balancing of the row and the column if( !balanced( colNorm, rowNorm, hasConverged, colB, rowB ) ) { m_bl_diag[0] *= colB; m_monic[0] *= rowB; } //Middle rows and columns excluding the diagonal //============================================== for( Index i=1; i headMonic( m_monic, 0, deg_1 ); colNorm = headMonic.array().abs().sum(); rowNorm = abs( m_bl_diag[ebl] ); //Compute balancing of the row and the column if( !balanced( colNorm, rowNorm, hasConverged, colB, rowB ) ) { headMonic *= colB; m_bl_diag[ebl] *= rowB; } } } } // end namespace internal } // end namespace Eigen #endif // EIGEN_COMPANION_H RcppEigen/inst/include/unsupported/Eigen/src/Polynomials/PolynomialUtils.h0000644000176200001440000001137613100733213026560 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Manuel Yguel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_POLYNOMIAL_UTILS_H #define EIGEN_POLYNOMIAL_UTILS_H namespace Eigen { /** \ingroup Polynomials_Module * \returns the evaluation of the polynomial at x using Horner algorithm. * * \param[in] poly : the vector of coefficients of the polynomial ordered * by degrees i.e. poly[i] is the coefficient of degree i of the polynomial * e.g. \f$ 1 + 3x^2 \f$ is stored as a vector \f$ [ 1, 0, 3 ] \f$. * \param[in] x : the value to evaluate the polynomial at. * * Note for stability: *
\f$ |x| \le 1 \f$
*/ template inline T poly_eval_horner( const Polynomials& poly, const T& x ) { T val=poly[poly.size()-1]; for(DenseIndex i=poly.size()-2; i>=0; --i ){ val = val*x + poly[i]; } return val; } /** \ingroup Polynomials_Module * \returns the evaluation of the polynomial at x using stabilized Horner algorithm. * * \param[in] poly : the vector of coefficients of the polynomial ordered * by degrees i.e. poly[i] is the coefficient of degree i of the polynomial * e.g. \f$ 1 + 3x^2 \f$ is stored as a vector \f$ [ 1, 0, 3 ] \f$. * \param[in] x : the value to evaluate the polynomial at. */ template inline T poly_eval( const Polynomials& poly, const T& x ) { typedef typename NumTraits::Real Real; if( numext::abs2( x ) <= Real(1) ){ return poly_eval_horner( poly, x ); } else { T val=poly[0]; T inv_x = T(1)/x; for( DenseIndex i=1; iPrecondition:
*
the leading coefficient of the input polynomial poly must be non zero
*/ template inline typename NumTraits::Real cauchy_max_bound( const Polynomial& poly ) { using std::abs; typedef typename Polynomial::Scalar Scalar; typedef typename NumTraits::Real Real; eigen_assert( Scalar(0) != poly[poly.size()-1] ); const Scalar inv_leading_coeff = Scalar(1)/poly[poly.size()-1]; Real cb(0); for( DenseIndex i=0; i inline typename NumTraits::Real cauchy_min_bound( const Polynomial& poly ) { using std::abs; typedef typename Polynomial::Scalar Scalar; typedef typename NumTraits::Real Real; DenseIndex i=0; while( i void roots_to_monicPolynomial( const RootVector& rv, Polynomial& poly ) { typedef typename Polynomial::Scalar Scalar; poly.setZero( rv.size()+1 ); poly[0] = -rv[0]; poly[1] = Scalar(1); for( DenseIndex i=1; i< rv.size(); ++i ) { for( DenseIndex j=i+1; j>0; --j ){ poly[j] = poly[j-1] - rv[i]*poly[j]; } poly[0] = -rv[i]*poly[0]; } } } // end namespace Eigen #endif // EIGEN_POLYNOMIAL_UTILS_H RcppEigen/inst/include/unsupported/Eigen/src/Polynomials/PolynomialSolver.h0000644000176200001440000003405013100733213026724 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Manuel Yguel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_POLYNOMIAL_SOLVER_H #define EIGEN_POLYNOMIAL_SOLVER_H namespace Eigen { /** \ingroup Polynomials_Module * \class PolynomialSolverBase. * * \brief Defined to be inherited by polynomial solvers: it provides * convenient methods such as * - real roots, * - greatest, smallest complex roots, * - real roots with greatest, smallest absolute real value, * - greatest, smallest real roots. * * It stores the set of roots as a vector of complexes. * */ template< typename _Scalar, int _Deg > class PolynomialSolverBase { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg) typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef std::complex RootType; typedef Matrix RootsType; typedef DenseIndex Index; protected: template< typename OtherPolynomial > inline void setPolynomial( const OtherPolynomial& poly ){ m_roots.resize(poly.size()-1); } public: template< typename OtherPolynomial > inline PolynomialSolverBase( const OtherPolynomial& poly ){ setPolynomial( poly() ); } inline PolynomialSolverBase(){} public: /** \returns the complex roots of the polynomial */ inline const RootsType& roots() const { return m_roots; } public: /** Clear and fills the back insertion sequence with the real roots of the polynomial * i.e. the real part of the complex roots that have an imaginary part which * absolute value is smaller than absImaginaryThreshold. * absImaginaryThreshold takes the dummy_precision associated * with the _Scalar template parameter of the PolynomialSolver class as the default value. * * \param[out] bi_seq : the back insertion sequence (stl concept) * \param[in] absImaginaryThreshold : the maximum bound of the imaginary part of a complex * number that is considered as real. * */ template inline void realRoots( Stl_back_insertion_sequence& bi_seq, const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const { using std::abs; bi_seq.clear(); for(Index i=0; i inline const RootType& selectComplexRoot_withRespectToNorm( squaredNormBinaryPredicate& pred ) const { Index res=0; RealScalar norm2 = numext::abs2( m_roots[0] ); for( Index i=1; i greater; return selectComplexRoot_withRespectToNorm( greater ); } /** * \returns the complex root with smallest norm. */ inline const RootType& smallestRoot() const { std::less less; return selectComplexRoot_withRespectToNorm( less ); } protected: template inline const RealScalar& selectRealRoot_withRespectToAbsRealPart( squaredRealPartBinaryPredicate& pred, bool& hasArealRoot, const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const { using std::abs; hasArealRoot = false; Index res=0; RealScalar abs2(0); for( Index i=0; i inline const RealScalar& selectRealRoot_withRespectToRealPart( RealPartBinaryPredicate& pred, bool& hasArealRoot, const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const { using std::abs; hasArealRoot = false; Index res=0; RealScalar val(0); for( Index i=0; i::dummy_precision() ) const { std::greater greater; return selectRealRoot_withRespectToAbsRealPart( greater, hasArealRoot, absImaginaryThreshold ); } /** * \returns a real root with smallest absolute magnitude. * A real root is defined as the real part of a complex root with absolute imaginary * part smallest than absImaginaryThreshold. * absImaginaryThreshold takes the dummy_precision associated * with the _Scalar template parameter of the PolynomialSolver class as the default value. * If no real root is found the boolean hasArealRoot is set to false and the real part of * the root with smallest absolute imaginary part is returned instead. * * \param[out] hasArealRoot : boolean true if a real root is found according to the * absImaginaryThreshold criterion, false otherwise. * \param[in] absImaginaryThreshold : threshold on the absolute imaginary part to decide * whether or not a root is real. */ inline const RealScalar& absSmallestRealRoot( bool& hasArealRoot, const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const { std::less less; return selectRealRoot_withRespectToAbsRealPart( less, hasArealRoot, absImaginaryThreshold ); } /** * \returns the real root with greatest value. * A real root is defined as the real part of a complex root with absolute imaginary * part smallest than absImaginaryThreshold. * absImaginaryThreshold takes the dummy_precision associated * with the _Scalar template parameter of the PolynomialSolver class as the default value. * If no real root is found the boolean hasArealRoot is set to false and the real part of * the root with smallest absolute imaginary part is returned instead. * * \param[out] hasArealRoot : boolean true if a real root is found according to the * absImaginaryThreshold criterion, false otherwise. * \param[in] absImaginaryThreshold : threshold on the absolute imaginary part to decide * whether or not a root is real. */ inline const RealScalar& greatestRealRoot( bool& hasArealRoot, const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const { std::greater greater; return selectRealRoot_withRespectToRealPart( greater, hasArealRoot, absImaginaryThreshold ); } /** * \returns the real root with smallest value. * A real root is defined as the real part of a complex root with absolute imaginary * part smallest than absImaginaryThreshold. * absImaginaryThreshold takes the dummy_precision associated * with the _Scalar template parameter of the PolynomialSolver class as the default value. * If no real root is found the boolean hasArealRoot is set to false and the real part of * the root with smallest absolute imaginary part is returned instead. * * \param[out] hasArealRoot : boolean true if a real root is found according to the * absImaginaryThreshold criterion, false otherwise. * \param[in] absImaginaryThreshold : threshold on the absolute imaginary part to decide * whether or not a root is real. */ inline const RealScalar& smallestRealRoot( bool& hasArealRoot, const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const { std::less less; return selectRealRoot_withRespectToRealPart( less, hasArealRoot, absImaginaryThreshold ); } protected: RootsType m_roots; }; #define EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( BASE ) \ typedef typename BASE::Scalar Scalar; \ typedef typename BASE::RealScalar RealScalar; \ typedef typename BASE::RootType RootType; \ typedef typename BASE::RootsType RootsType; /** \ingroup Polynomials_Module * * \class PolynomialSolver * * \brief A polynomial solver * * Computes the complex roots of a real polynomial. * * \param _Scalar the scalar type, i.e., the type of the polynomial coefficients * \param _Deg the degree of the polynomial, can be a compile time value or Dynamic. * Notice that the number of polynomial coefficients is _Deg+1. * * This class implements a polynomial solver and provides convenient methods such as * - real roots, * - greatest, smallest complex roots, * - real roots with greatest, smallest absolute real value. * - greatest, smallest real roots. * * WARNING: this polynomial solver is experimental, part of the unsupported Eigen modules. * * * Currently a QR algorithm is used to compute the eigenvalues of the companion matrix of * the polynomial to compute its roots. * This supposes that the complex moduli of the roots are all distinct: e.g. there should * be no multiple roots or conjugate roots for instance. * With 32bit (float) floating types this problem shows up frequently. * However, almost always, correct accuracy is reached even in these cases for 64bit * (double) floating types and small polynomial degree (<20). */ template< typename _Scalar, int _Deg > class PolynomialSolver : public PolynomialSolverBase<_Scalar,_Deg> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg) typedef PolynomialSolverBase<_Scalar,_Deg> PS_Base; EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( PS_Base ) typedef Matrix CompanionMatrixType; typedef EigenSolver EigenSolverType; public: /** Computes the complex roots of a new polynomial. */ template< typename OtherPolynomial > void compute( const OtherPolynomial& poly ) { eigen_assert( Scalar(0) != poly[poly.size()-1] ); eigen_assert( poly.size() > 1 ); if(poly.size() > 2 ) { internal::companion companion( poly ); companion.balance(); m_eigenSolver.compute( companion.denseMatrix() ); m_roots = m_eigenSolver.eigenvalues(); } else if(poly.size () == 2) { m_roots.resize(1); m_roots[0] = -poly[0]/poly[1]; } } public: template< typename OtherPolynomial > inline PolynomialSolver( const OtherPolynomial& poly ){ compute( poly ); } inline PolynomialSolver(){} protected: using PS_Base::m_roots; EigenSolverType m_eigenSolver; }; template< typename _Scalar > class PolynomialSolver<_Scalar,1> : public PolynomialSolverBase<_Scalar,1> { public: typedef PolynomialSolverBase<_Scalar,1> PS_Base; EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( PS_Base ) public: /** Computes the complex roots of a new polynomial. */ template< typename OtherPolynomial > void compute( const OtherPolynomial& poly ) { eigen_assert( poly.size() == 2 ); eigen_assert( Scalar(0) != poly[1] ); m_roots[0] = -poly[0]/poly[1]; } public: template< typename OtherPolynomial > inline PolynomialSolver( const OtherPolynomial& poly ){ compute( poly ); } inline PolynomialSolver(){} protected: using PS_Base::m_roots; }; } // end namespace Eigen #endif // EIGEN_POLYNOMIAL_SOLVER_H RcppEigen/inst/include/unsupported/Eigen/src/FFT/0000755000176200001440000000000012253717461021363 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/FFT/ei_fftw_impl.h0000644000176200001440000002200612253717461024200 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Mark Borgerding mark a borgerding net // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. namespace Eigen { namespace internal { // FFTW uses non-const arguments // so we must use ugly const_cast calls for all the args it uses // // This should be safe as long as // 1. we use FFTW_ESTIMATE for all our planning // see the FFTW docs section 4.3.2 "Planner Flags" // 2. fftw_complex is compatible with std::complex // This assumes std::complex layout is array of size 2 with real,imag template inline T * fftw_cast(const T* p) { return const_cast( p); } inline fftw_complex * fftw_cast( const std::complex * p) { return const_cast( reinterpret_cast(p) ); } inline fftwf_complex * fftw_cast( const std::complex * p) { return const_cast( reinterpret_cast(p) ); } inline fftwl_complex * fftw_cast( const std::complex * p) { return const_cast( reinterpret_cast(p) ); } template struct fftw_plan {}; template <> struct fftw_plan { typedef float scalar_type; typedef fftwf_complex complex_type; fftwf_plan m_plan; fftw_plan() :m_plan(NULL) {} ~fftw_plan() {if (m_plan) fftwf_destroy_plan(m_plan);} inline void fwd(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwf_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft( m_plan, src,dst); } inline void inv(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwf_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft( m_plan, src,dst); } inline void fwd(complex_type * dst,scalar_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwf_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft_r2c( m_plan,src,dst); } inline void inv(scalar_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwf_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft_c2r( m_plan, src,dst); } inline void fwd2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftwf_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft( m_plan, src,dst); } inline void inv2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftwf_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft( m_plan, src,dst); } }; template <> struct fftw_plan { typedef double scalar_type; typedef fftw_complex complex_type; ::fftw_plan m_plan; fftw_plan() :m_plan(NULL) {} ~fftw_plan() {if (m_plan) fftw_destroy_plan(m_plan);} inline void fwd(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftw_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft( m_plan, src,dst); } inline void inv(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftw_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft( m_plan, src,dst); } inline void fwd(complex_type * dst,scalar_type * src,int nfft) { if (m_plan==NULL) m_plan = fftw_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft_r2c( m_plan,src,dst); } inline void inv(scalar_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftw_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft_c2r( m_plan, src,dst); } inline void fwd2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftw_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft( m_plan, src,dst); } inline void inv2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftw_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft( m_plan, src,dst); } }; template <> struct fftw_plan { typedef long double scalar_type; typedef fftwl_complex complex_type; fftwl_plan m_plan; fftw_plan() :m_plan(NULL) {} ~fftw_plan() {if (m_plan) fftwl_destroy_plan(m_plan);} inline void fwd(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwl_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft( m_plan, src,dst); } inline void inv(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwl_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft( m_plan, src,dst); } inline void fwd(complex_type * dst,scalar_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwl_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft_r2c( m_plan,src,dst); } inline void inv(scalar_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwl_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft_c2r( m_plan, src,dst); } inline void fwd2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftwl_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft( m_plan, src,dst); } inline void inv2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftwl_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft( m_plan, src,dst); } }; template struct fftw_impl { typedef _Scalar Scalar; typedef std::complex Complex; inline void clear() { m_plans.clear(); } // complex-to-complex forward FFT inline void fwd( Complex * dst,const Complex *src,int nfft) { get_plan(nfft,false,dst,src).fwd(fftw_cast(dst), fftw_cast(src),nfft ); } // real-to-complex forward FFT inline void fwd( Complex * dst,const Scalar * src,int nfft) { get_plan(nfft,false,dst,src).fwd(fftw_cast(dst), fftw_cast(src) ,nfft); } // 2-d complex-to-complex inline void fwd2(Complex * dst, const Complex * src, int n0,int n1) { get_plan(n0,n1,false,dst,src).fwd2(fftw_cast(dst), fftw_cast(src) ,n0,n1); } // inverse complex-to-complex inline void inv(Complex * dst,const Complex *src,int nfft) { get_plan(nfft,true,dst,src).inv(fftw_cast(dst), fftw_cast(src),nfft ); } // half-complex to scalar inline void inv( Scalar * dst,const Complex * src,int nfft) { get_plan(nfft,true,dst,src).inv(fftw_cast(dst), fftw_cast(src),nfft ); } // 2-d complex-to-complex inline void inv2(Complex * dst, const Complex * src, int n0,int n1) { get_plan(n0,n1,true,dst,src).inv2(fftw_cast(dst), fftw_cast(src) ,n0,n1); } protected: typedef fftw_plan PlanData; typedef std::map PlanMap; PlanMap m_plans; inline PlanData & get_plan(int nfft,bool inverse,void * dst,const void * src) { bool inplace = (dst==src); bool aligned = ( (reinterpret_cast(src)&15) | (reinterpret_cast(dst)&15) ) == 0; int64_t key = ( (nfft<<3 ) | (inverse<<2) | (inplace<<1) | aligned ) << 1; return m_plans[key]; } inline PlanData & get_plan(int n0,int n1,bool inverse,void * dst,const void * src) { bool inplace = (dst==src); bool aligned = ( (reinterpret_cast(src)&15) | (reinterpret_cast(dst)&15) ) == 0; int64_t key = ( ( (((int64_t)n0) << 30)|(n1<<3 ) | (inverse<<2) | (inplace<<1) | aligned ) << 1 ) + 1; return m_plans[key]; } }; } // end namespace internal } // end namespace Eigen /* vim: set filetype=cpp et sw=2 ts=2 ai: */ RcppEigen/inst/include/unsupported/Eigen/src/FFT/ei_kissfft_impl.h0000644000176200001440000002776312253717461024722 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Mark Borgerding mark a borgerding net // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. namespace Eigen { namespace internal { // This FFT implementation was derived from kissfft http:sourceforge.net/projects/kissfft // Copyright 2003-2009 Mark Borgerding template struct kiss_cpx_fft { typedef _Scalar Scalar; typedef std::complex Complex; std::vector m_twiddles; std::vector m_stageRadix; std::vector m_stageRemainder; std::vector m_scratchBuf; bool m_inverse; inline void make_twiddles(int nfft,bool inverse) { using std::acos; m_inverse = inverse; m_twiddles.resize(nfft); Scalar phinc = (inverse?2:-2)* acos( (Scalar) -1) / nfft; for (int i=0;in) p=n;// impossible to have a factor > sqrt(n) } n /= p; m_stageRadix.push_back(p); m_stageRemainder.push_back(n); if ( p > 5 ) m_scratchBuf.resize(p); // scratchbuf will be needed in bfly_generic }while(n>1); } template inline void work( int stage,Complex * xout, const _Src * xin, size_t fstride,size_t in_stride) { int p = m_stageRadix[stage]; int m = m_stageRemainder[stage]; Complex * Fout_beg = xout; Complex * Fout_end = xout + p*m; if (m>1) { do{ // recursive call: // DFT of size m*p performed by doing // p instances of smaller DFTs of size m, // each one takes a decimated version of the input work(stage+1, xout , xin, fstride*p,in_stride); xin += fstride*in_stride; }while( (xout += m) != Fout_end ); }else{ do{ *xout = *xin; xin += fstride*in_stride; }while(++xout != Fout_end ); } xout=Fout_beg; // recombine the p smaller DFTs switch (p) { case 2: bfly2(xout,fstride,m); break; case 3: bfly3(xout,fstride,m); break; case 4: bfly4(xout,fstride,m); break; case 5: bfly5(xout,fstride,m); break; default: bfly_generic(xout,fstride,m,p); break; } } inline void bfly2( Complex * Fout, const size_t fstride, int m) { for (int k=0;kreal() - Scalar(.5)*scratch[3].real() , Fout->imag() - Scalar(.5)*scratch[3].imag() ); scratch[0] *= epi3.imag(); *Fout += scratch[3]; Fout[m2] = Complex( Fout[m].real() + scratch[0].imag() , Fout[m].imag() - scratch[0].real() ); Fout[m] += Complex( -scratch[0].imag(),scratch[0].real() ); ++Fout; }while(--k); } inline void bfly5( Complex * Fout, const size_t fstride, const size_t m) { Complex *Fout0,*Fout1,*Fout2,*Fout3,*Fout4; size_t u; Complex scratch[13]; Complex * twiddles = &m_twiddles[0]; Complex *tw; Complex ya,yb; ya = twiddles[fstride*m]; yb = twiddles[fstride*2*m]; Fout0=Fout; Fout1=Fout0+m; Fout2=Fout0+2*m; Fout3=Fout0+3*m; Fout4=Fout0+4*m; tw=twiddles; for ( u=0; u(m_twiddles.size()); Complex * scratchbuf = &m_scratchBuf[0]; for ( u=0; u(fstride) * k; if (twidx>=Norig) twidx-=Norig; t=scratchbuf[q] * twiddles[twidx]; Fout[ k ] += t; } k += m; } } } }; template struct kissfft_impl { typedef _Scalar Scalar; typedef std::complex Complex; void clear() { m_plans.clear(); m_realTwiddles.clear(); } inline void fwd( Complex * dst,const Complex *src,int nfft) { get_plan(nfft,false).work(0, dst, src, 1,1); } inline void fwd2( Complex * dst,const Complex *src,int n0,int n1) { EIGEN_UNUSED_VARIABLE(dst); EIGEN_UNUSED_VARIABLE(src); EIGEN_UNUSED_VARIABLE(n0); EIGEN_UNUSED_VARIABLE(n1); } inline void inv2( Complex * dst,const Complex *src,int n0,int n1) { EIGEN_UNUSED_VARIABLE(dst); EIGEN_UNUSED_VARIABLE(src); EIGEN_UNUSED_VARIABLE(n0); EIGEN_UNUSED_VARIABLE(n1); } // real-to-complex forward FFT // perform two FFTs of src even and src odd // then twiddle to recombine them into the half-spectrum format // then fill in the conjugate symmetric half inline void fwd( Complex * dst,const Scalar * src,int nfft) { if ( nfft&3 ) { // use generic mode for odd m_tmpBuf1.resize(nfft); get_plan(nfft,false).work(0, &m_tmpBuf1[0], src, 1,1); std::copy(m_tmpBuf1.begin(),m_tmpBuf1.begin()+(nfft>>1)+1,dst ); }else{ int ncfft = nfft>>1; int ncfft2 = nfft>>2; Complex * rtw = real_twiddles(ncfft2); // use optimized mode for even real fwd( dst, reinterpret_cast (src), ncfft); Complex dc = dst[0].real() + dst[0].imag(); Complex nyquist = dst[0].real() - dst[0].imag(); int k; for ( k=1;k <= ncfft2 ; ++k ) { Complex fpk = dst[k]; Complex fpnk = conj(dst[ncfft-k]); Complex f1k = fpk + fpnk; Complex f2k = fpk - fpnk; Complex tw= f2k * rtw[k-1]; dst[k] = (f1k + tw) * Scalar(.5); dst[ncfft-k] = conj(f1k -tw)*Scalar(.5); } dst[0] = dc; dst[ncfft] = nyquist; } } // inverse complex-to-complex inline void inv(Complex * dst,const Complex *src,int nfft) { get_plan(nfft,true).work(0, dst, src, 1,1); } // half-complex to scalar inline void inv( Scalar * dst,const Complex * src,int nfft) { if (nfft&3) { m_tmpBuf1.resize(nfft); m_tmpBuf2.resize(nfft); std::copy(src,src+(nfft>>1)+1,m_tmpBuf1.begin() ); for (int k=1;k<(nfft>>1)+1;++k) m_tmpBuf1[nfft-k] = conj(m_tmpBuf1[k]); inv(&m_tmpBuf2[0],&m_tmpBuf1[0],nfft); for (int k=0;k>1; int ncfft2 = nfft>>2; Complex * rtw = real_twiddles(ncfft2); m_tmpBuf1.resize(ncfft); m_tmpBuf1[0] = Complex( src[0].real() + src[ncfft].real(), src[0].real() - src[ncfft].real() ); for (int k = 1; k <= ncfft / 2; ++k) { Complex fk = src[k]; Complex fnkc = conj(src[ncfft-k]); Complex fek = fk + fnkc; Complex tmp = fk - fnkc; Complex fok = tmp * conj(rtw[k-1]); m_tmpBuf1[k] = fek + fok; m_tmpBuf1[ncfft-k] = conj(fek - fok); } get_plan(ncfft,true).work(0, reinterpret_cast(dst), &m_tmpBuf1[0], 1,1); } } protected: typedef kiss_cpx_fft PlanData; typedef std::map PlanMap; PlanMap m_plans; std::map > m_realTwiddles; std::vector m_tmpBuf1; std::vector m_tmpBuf2; inline int PlanKey(int nfft, bool isinverse) const { return (nfft<<1) | int(isinverse); } inline PlanData & get_plan(int nfft, bool inverse) { // TODO look for PlanKey(nfft, ! inverse) and conjugate the twiddles PlanData & pd = m_plans[ PlanKey(nfft,inverse) ]; if ( pd.m_twiddles.size() == 0 ) { pd.make_twiddles(nfft,inverse); pd.factorize(nfft); } return pd; } inline Complex * real_twiddles(int ncfft2) { using std::acos; std::vector & twidref = m_realTwiddles[ncfft2];// creates new if not there if ( (int)twidref.size() != ncfft2 ) { twidref.resize(ncfft2); int ncfft= ncfft2<<1; Scalar pi = acos( Scalar(-1) ); for (int k=1;k<=ncfft2;++k) twidref[k-1] = exp( Complex(0,-pi * (Scalar(k) / ncfft + Scalar(.5)) ) ); } return &twidref[0]; } }; } // end namespace internal } // end namespace Eigen /* vim: set filetype=cpp et sw=2 ts=2 ai: */ RcppEigen/inst/include/unsupported/Eigen/src/SparseExtra/0000755000176200001440000000000013115262204023171 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/SparseExtra/RandomSetter.h0000644000176200001440000002701413100733213025752 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_RANDOMSETTER_H #define EIGEN_RANDOMSETTER_H namespace Eigen { /** Represents a std::map * * \see RandomSetter */ template struct StdMapTraits { typedef int KeyType; typedef std::map Type; enum { IsSorted = 1 }; static void setInvalidKey(Type&, const KeyType&) {} }; #ifdef EIGEN_UNORDERED_MAP_SUPPORT /** Represents a std::unordered_map * * To use it you need to both define EIGEN_UNORDERED_MAP_SUPPORT and include the unordered_map header file * yourself making sure that unordered_map is defined in the std namespace. * * For instance, with current version of gcc you can either enable C++0x standard (-std=c++0x) or do: * \code * #include * #define EIGEN_UNORDERED_MAP_SUPPORT * namespace std { * using std::tr1::unordered_map; * } * \endcode * * \see RandomSetter */ template struct StdUnorderedMapTraits { typedef int KeyType; typedef std::unordered_map Type; enum { IsSorted = 0 }; static void setInvalidKey(Type&, const KeyType&) {} }; #endif // EIGEN_UNORDERED_MAP_SUPPORT #ifdef _DENSE_HASH_MAP_H_ /** Represents a google::dense_hash_map * * \see RandomSetter */ template struct GoogleDenseHashMapTraits { typedef int KeyType; typedef google::dense_hash_map Type; enum { IsSorted = 0 }; static void setInvalidKey(Type& map, const KeyType& k) { map.set_empty_key(k); } }; #endif #ifdef _SPARSE_HASH_MAP_H_ /** Represents a google::sparse_hash_map * * \see RandomSetter */ template struct GoogleSparseHashMapTraits { typedef int KeyType; typedef google::sparse_hash_map Type; enum { IsSorted = 0 }; static void setInvalidKey(Type&, const KeyType&) {} }; #endif /** \class RandomSetter * * \brief The RandomSetter is a wrapper object allowing to set/update a sparse matrix with random access * * \tparam SparseMatrixType the type of the sparse matrix we are updating * \tparam MapTraits a traits class representing the map implementation used for the temporary sparse storage. * Its default value depends on the system. * \tparam OuterPacketBits defines the number of rows (or columns) manage by a single map object * as a power of two exponent. * * This class temporarily represents a sparse matrix object using a generic map implementation allowing for * efficient random access. The conversion from the compressed representation to a hash_map object is performed * in the RandomSetter constructor, while the sparse matrix is updated back at destruction time. This strategy * suggest the use of nested blocks as in this example: * * \code * SparseMatrix m(rows,cols); * { * RandomSetter > w(m); * // don't use m but w instead with read/write random access to the coefficients: * for(;;) * w(rand(),rand()) = rand; * } * // when w is deleted, the data are copied back to m * // and m is ready to use. * \endcode * * Since hash_map objects are not fully sorted, representing a full matrix as a single hash_map would * involve a big and costly sort to update the compressed matrix back. To overcome this issue, a RandomSetter * use multiple hash_map, each representing 2^OuterPacketBits columns or rows according to the storage order. * To reach optimal performance, this value should be adjusted according to the average number of nonzeros * per rows/columns. * * The possible values for the template parameter MapTraits are: * - \b StdMapTraits: corresponds to std::map. (does not perform very well) * - \b GnuHashMapTraits: corresponds to __gnu_cxx::hash_map (available only with GCC) * - \b GoogleDenseHashMapTraits: corresponds to google::dense_hash_map (best efficiency, reasonable memory consumption) * - \b GoogleSparseHashMapTraits: corresponds to google::sparse_hash_map (best memory consumption, relatively good performance) * * The default map implementation depends on the availability, and the preferred order is: * GoogleSparseHashMapTraits, GnuHashMapTraits, and finally StdMapTraits. * * For performance and memory consumption reasons it is highly recommended to use one of * the Google's hash_map implementation. To enable the support for them, you have two options: * - \#include yourself \b before Eigen/Sparse header * - define EIGEN_GOOGLEHASH_SUPPORT * In the later case the inclusion of is made for you. * * \see http://code.google.com/p/google-sparsehash/ */ template class MapTraits = #if defined _DENSE_HASH_MAP_H_ GoogleDenseHashMapTraits #elif defined _HASH_MAP GnuHashMapTraits #else StdMapTraits #endif ,int OuterPacketBits = 6> class RandomSetter { typedef typename SparseMatrixType::Scalar Scalar; typedef typename SparseMatrixType::StorageIndex StorageIndex; struct ScalarWrapper { ScalarWrapper() : value(0) {} Scalar value; }; typedef typename MapTraits::KeyType KeyType; typedef typename MapTraits::Type HashMapType; static const int OuterPacketMask = (1 << OuterPacketBits) - 1; enum { SwapStorage = 1 - MapTraits::IsSorted, TargetRowMajor = (SparseMatrixType::Flags & RowMajorBit) ? 1 : 0, SetterRowMajor = SwapStorage ? 1-TargetRowMajor : TargetRowMajor }; public: /** Constructs a random setter object from the sparse matrix \a target * * Note that the initial value of \a target are imported. If you want to re-set * a sparse matrix from scratch, then you must set it to zero first using the * setZero() function. */ inline RandomSetter(SparseMatrixType& target) : mp_target(&target) { const Index outerSize = SwapStorage ? target.innerSize() : target.outerSize(); const Index innerSize = SwapStorage ? target.outerSize() : target.innerSize(); m_outerPackets = outerSize >> OuterPacketBits; if (outerSize&OuterPacketMask) m_outerPackets += 1; m_hashmaps = new HashMapType[m_outerPackets]; // compute number of bits needed to store inner indices Index aux = innerSize - 1; m_keyBitsOffset = 0; while (aux) { ++m_keyBitsOffset; aux = aux >> 1; } KeyType ik = (1<<(OuterPacketBits+m_keyBitsOffset)); for (Index k=0; k::setInvalidKey(m_hashmaps[k],ik); // insert current coeffs for (Index j=0; jouterSize(); ++j) for (typename SparseMatrixType::InnerIterator it(*mp_target,j); it; ++it) (*this)(TargetRowMajor?j:it.index(), TargetRowMajor?it.index():j) = it.value(); } /** Destructor updating back the sparse matrix target */ ~RandomSetter() { KeyType keyBitsMask = (1<setZero(); mp_target->makeCompressed(); mp_target->reserve(nonZeros()); Index prevOuter = -1; for (Index k=0; kfirst >> m_keyBitsOffset) + outerOffset; const Index inner = it->first & keyBitsMask; if (prevOuter!=outer) { for (Index j=prevOuter+1;j<=outer;++j) mp_target->startVec(j); prevOuter = outer; } mp_target->insertBackByOuterInner(outer, inner) = it->second.value; } } mp_target->finalize(); } else { VectorXi positions(mp_target->outerSize()); positions.setZero(); // pass 1 for (Index k=0; kfirst & keyBitsMask; ++positions[outer]; } } // prefix sum Index count = 0; for (Index j=0; jouterSize(); ++j) { Index tmp = positions[j]; mp_target->outerIndexPtr()[j] = count; positions[j] = count; count += tmp; } mp_target->makeCompressed(); mp_target->outerIndexPtr()[mp_target->outerSize()] = count; mp_target->resizeNonZeros(count); // pass 2 for (Index k=0; kfirst >> m_keyBitsOffset) + outerOffset; const Index outer = it->first & keyBitsMask; // sorted insertion // Note that we have to deal with at most 2^OuterPacketBits unsorted coefficients, // moreover those 2^OuterPacketBits coeffs are likely to be sparse, an so only a // small fraction of them have to be sorted, whence the following simple procedure: Index posStart = mp_target->outerIndexPtr()[outer]; Index i = (positions[outer]++) - 1; while ( (i >= posStart) && (mp_target->innerIndexPtr()[i] > inner) ) { mp_target->valuePtr()[i+1] = mp_target->valuePtr()[i]; mp_target->innerIndexPtr()[i+1] = mp_target->innerIndexPtr()[i]; --i; } mp_target->innerIndexPtr()[i+1] = inner; mp_target->valuePtr()[i+1] = it->second.value; } } } delete[] m_hashmaps; } /** \returns a reference to the coefficient at given coordinates \a row, \a col */ Scalar& operator() (Index row, Index col) { const Index outer = SetterRowMajor ? row : col; const Index inner = SetterRowMajor ? col : row; const Index outerMajor = outer >> OuterPacketBits; // index of the packet/map const Index outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet const KeyType key = internal::convert_index((outerMinor<(m_hashmaps[k].size()); return nz; } protected: HashMapType* m_hashmaps; SparseMatrixType* mp_target; Index m_outerPackets; unsigned char m_keyBitsOffset; }; } // end namespace Eigen #endif // EIGEN_RANDOMSETTER_H RcppEigen/inst/include/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h0000644000176200001440000003177613100733213027304 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DYNAMIC_SPARSEMATRIX_H #define EIGEN_DYNAMIC_SPARSEMATRIX_H namespace Eigen { /** \deprecated use a SparseMatrix in an uncompressed mode * * \class DynamicSparseMatrix * * \brief A sparse matrix class designed for matrix assembly purpose * * \param _Scalar the scalar type, i.e. the type of the coefficients * * Unlike SparseMatrix, this class provides a much higher degree of flexibility. In particular, it allows * random read/write accesses in log(rho*outer_size) where \c rho is the probability that a coefficient is * nonzero and outer_size is the number of columns if the matrix is column-major and the number of rows * otherwise. * * Internally, the data are stored as a std::vector of compressed vector. The performances of random writes might * decrease as the number of nonzeros per inner-vector increase. In practice, we observed very good performance * till about 100 nonzeros/vector, and the performance remains relatively good till 500 nonzeros/vectors. * * \see SparseMatrix */ namespace internal { template struct traits > { typedef _Scalar Scalar; typedef _StorageIndex StorageIndex; typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { RowsAtCompileTime = Dynamic, ColsAtCompileTime = Dynamic, MaxRowsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic, Flags = _Options | NestByRefBit | LvalueBit, CoeffReadCost = NumTraits::ReadCost, SupportedAccessPatterns = OuterRandomAccessPattern }; }; } template class DynamicSparseMatrix : public SparseMatrixBase > { typedef SparseMatrixBase Base; using Base::convert_index; public: EIGEN_SPARSE_PUBLIC_INTERFACE(DynamicSparseMatrix) // FIXME: why are these operator already alvailable ??? // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, +=) // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, -=) typedef MappedSparseMatrix Map; using Base::IsRowMajor; using Base::operator=; enum { Options = _Options }; protected: typedef DynamicSparseMatrix TransposedSparseMatrix; Index m_innerSize; std::vector > m_data; public: inline Index rows() const { return IsRowMajor ? outerSize() : m_innerSize; } inline Index cols() const { return IsRowMajor ? m_innerSize : outerSize(); } inline Index innerSize() const { return m_innerSize; } inline Index outerSize() const { return convert_index(m_data.size()); } inline Index innerNonZeros(Index j) const { return m_data[j].size(); } std::vector >& _data() { return m_data; } const std::vector >& _data() const { return m_data; } /** \returns the coefficient value at given position \a row, \a col * This operation involes a log(rho*outer_size) binary search. */ inline Scalar coeff(Index row, Index col) const { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; return m_data[outer].at(inner); } /** \returns a reference to the coefficient value at given position \a row, \a col * This operation involes a log(rho*outer_size) binary search. If the coefficient does not * exist yet, then a sorted insertion into a sequential buffer is performed. */ inline Scalar& coeffRef(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; return m_data[outer].atWithInsertion(inner); } class InnerIterator; class ReverseInnerIterator; void setZero() { for (Index j=0; j0) { Index reserveSizePerVector = (std::max)(reserveSize/outerSize(),Index(4)); for (Index j=0; j(m_data[outer].size()) - 1; m_data[outer].resize(id+2,1); while ( (id >= startId) && (m_data[outer].index(id) > inner) ) { m_data[outer].index(id+1) = m_data[outer].index(id); m_data[outer].value(id+1) = m_data[outer].value(id); --id; } m_data[outer].index(id+1) = inner; m_data[outer].value(id+1) = 0; return m_data[outer].value(id+1); } /** Does nothing: provided for compatibility with SparseMatrix */ inline void finalize() {} /** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */ void prune(Scalar reference, RealScalar epsilon = NumTraits::dummy_precision()) { for (Index j=0; jinnerSize) { // remove all coefficients with innerCoord>=innerSize // TODO //std::cerr << "not implemented yet\n"; exit(2); } if (m_data.size() != outerSize) { m_data.resize(outerSize); } } /** The class DynamicSparseMatrix is deprectaed */ EIGEN_DEPRECATED inline DynamicSparseMatrix() : m_innerSize(0), m_data(0) { eigen_assert(innerSize()==0 && outerSize()==0); } /** The class DynamicSparseMatrix is deprectaed */ EIGEN_DEPRECATED inline DynamicSparseMatrix(Index rows, Index cols) : m_innerSize(0) { resize(rows, cols); } /** The class DynamicSparseMatrix is deprectaed */ template EIGEN_DEPRECATED explicit inline DynamicSparseMatrix(const SparseMatrixBase& other) : m_innerSize(0) { Base::operator=(other.derived()); } inline DynamicSparseMatrix(const DynamicSparseMatrix& other) : Base(), m_innerSize(0) { *this = other.derived(); } inline void swap(DynamicSparseMatrix& other) { //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n"); std::swap(m_innerSize, other.m_innerSize); //std::swap(m_outerSize, other.m_outerSize); m_data.swap(other.m_data); } inline DynamicSparseMatrix& operator=(const DynamicSparseMatrix& other) { if (other.isRValue()) { swap(other.const_cast_derived()); } else { resize(other.rows(), other.cols()); m_data = other.m_data; } return *this; } /** Destructor */ inline ~DynamicSparseMatrix() {} public: /** \deprecated * Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */ EIGEN_DEPRECATED void startFill(Index reserveSize = 1000) { setZero(); reserve(reserveSize); } /** \deprecated use insert() * inserts a nonzero coefficient at given coordinates \a row, \a col and returns its reference assuming that: * 1 - the coefficient does not exist yet * 2 - this the coefficient with greater inner coordinate for the given outer coordinate. * In other words, assuming \c *this is column-major, then there must not exists any nonzero coefficient of coordinates * \c i \c x \a col such that \c i >= \a row. Otherwise the matrix is invalid. * * \see fillrand(), coeffRef() */ EIGEN_DEPRECATED Scalar& fill(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; return insertBack(outer,inner); } /** \deprecated use insert() * Like fill() but with random inner coordinates. * Compared to the generic coeffRef(), the unique limitation is that we assume * the coefficient does not exist yet. */ EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col) { return insert(row,col); } /** \deprecated use finalize() * Does nothing. Provided for compatibility with SparseMatrix. */ EIGEN_DEPRECATED void endFill() {} # ifdef EIGEN_DYNAMICSPARSEMATRIX_PLUGIN # include EIGEN_DYNAMICSPARSEMATRIX_PLUGIN # endif }; template class DynamicSparseMatrix::InnerIterator : public SparseVector::InnerIterator { typedef typename SparseVector::InnerIterator Base; public: InnerIterator(const DynamicSparseMatrix& mat, Index outer) : Base(mat.m_data[outer]), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : Base::index(); } inline Index col() const { return IsRowMajor ? Base::index() : m_outer; } inline Index outer() const { return m_outer; } protected: const Index m_outer; }; template class DynamicSparseMatrix::ReverseInnerIterator : public SparseVector::ReverseInnerIterator { typedef typename SparseVector::ReverseInnerIterator Base; public: ReverseInnerIterator(const DynamicSparseMatrix& mat, Index outer) : Base(mat.m_data[outer]), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : Base::index(); } inline Index col() const { return IsRowMajor ? Base::index() : m_outer; } inline Index outer() const { return m_outer; } protected: const Index m_outer; }; namespace internal { template struct evaluator > : evaluator_base > { typedef _Scalar Scalar; typedef DynamicSparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType; typedef typename SparseMatrixType::InnerIterator InnerIterator; typedef typename SparseMatrixType::ReverseInnerIterator ReverseInnerIterator; enum { CoeffReadCost = NumTraits<_Scalar>::ReadCost, Flags = SparseMatrixType::Flags }; evaluator() : m_matrix(0) {} evaluator(const SparseMatrixType &mat) : m_matrix(&mat) {} operator SparseMatrixType&() { return m_matrix->const_cast_derived(); } operator const SparseMatrixType&() const { return *m_matrix; } Scalar coeff(Index row, Index col) const { return m_matrix->coeff(row,col); } Index nonZerosEstimate() const { return m_matrix->nonZeros(); } const SparseMatrixType *m_matrix; }; } } // end namespace Eigen #endif // EIGEN_DYNAMIC_SPARSEMATRIX_H RcppEigen/inst/include/unsupported/Eigen/src/SparseExtra/MarketIO.h0000644000176200001440000001661313100733213025021 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Gael Guennebaud // Copyright (C) 2012 Desire NUENTSA WAKAM // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_MARKET_IO_H #define EIGEN_SPARSE_MARKET_IO_H #include namespace Eigen { namespace internal { template inline bool GetMarketLine (std::stringstream& line, Index& M, Index& N, Index& i, Index& j, Scalar& value) { line >> i >> j >> value; i--; j--; if(i>=0 && j>=0 && i inline bool GetMarketLine (std::stringstream& line, Index& M, Index& N, Index& i, Index& j, std::complex& value) { Scalar valR, valI; line >> i >> j >> valR >> valI; i--; j--; if(i>=0 && j>=0 && i(valR, valI); return true; } else return false; } template inline void GetVectorElt (const std::string& line, RealScalar& val) { std::istringstream newline(line); newline >> val; } template inline void GetVectorElt (const std::string& line, std::complex& val) { RealScalar valR, valI; std::istringstream newline(line); newline >> valR >> valI; val = std::complex(valR, valI); } template inline void putMarketHeader(std::string& header,int sym) { header= "%%MatrixMarket matrix coordinate "; if(internal::is_same >::value || internal::is_same >::value) { header += " complex"; if(sym == Symmetric) header += " symmetric"; else if (sym == SelfAdjoint) header += " Hermitian"; else header += " general"; } else { header += " real"; if(sym == Symmetric) header += " symmetric"; else header += " general"; } } template inline void PutMatrixElt(Scalar value, int row, int col, std::ofstream& out) { out << row << " "<< col << " " << value << "\n"; } template inline void PutMatrixElt(std::complex value, int row, int col, std::ofstream& out) { out << row << " " << col << " " << value.real() << " " << value.imag() << "\n"; } template inline void putVectorElt(Scalar value, std::ofstream& out) { out << value << "\n"; } template inline void putVectorElt(std::complex value, std::ofstream& out) { out << value.real << " " << value.imag()<< "\n"; } } // end namepsace internal inline bool getMarketHeader(const std::string& filename, int& sym, bool& iscomplex, bool& isvector) { sym = 0; isvector = false; std::ifstream in(filename.c_str(),std::ios::in); if(!in) return false; std::string line; // The matrix header is always the first line in the file std::getline(in, line); eigen_assert(in.good()); std::stringstream fmtline(line); std::string substr[5]; fmtline>> substr[0] >> substr[1] >> substr[2] >> substr[3] >> substr[4]; if(substr[2].compare("array") == 0) isvector = true; if(substr[3].compare("complex") == 0) iscomplex = true; if(substr[4].compare("symmetric") == 0) sym = Symmetric; else if (substr[4].compare("Hermitian") == 0) sym = SelfAdjoint; return true; } template bool loadMarket(SparseMatrixType& mat, const std::string& filename) { typedef typename SparseMatrixType::Scalar Scalar; typedef typename SparseMatrixType::Index Index; std::ifstream input(filename.c_str(),std::ios::in); if(!input) return false; const int maxBuffersize = 2048; char buffer[maxBuffersize]; bool readsizes = false; typedef Triplet T; std::vector elements; Index M(-1), N(-1), NNZ(-1); Index count = 0; while(input.getline(buffer, maxBuffersize)) { // skip comments //NOTE An appropriate test should be done on the header to get the symmetry if(buffer[0]=='%') continue; std::stringstream line(buffer); if(!readsizes) { line >> M >> N >> NNZ; if(M > 0 && N > 0 && NNZ > 0) { readsizes = true; //std::cout << "sizes: " << M << "," << N << "," << NNZ << "\n"; mat.resize(M,N); mat.reserve(NNZ); } } else { Index i(-1), j(-1); Scalar value; if( internal::GetMarketLine(line, M, N, i, j, value) ) { ++ count; elements.push_back(T(i,j,value)); } else std::cerr << "Invalid read: " << i << "," << j << "\n"; } } mat.setFromTriplets(elements.begin(), elements.end()); if(count!=NNZ) std::cerr << count << "!=" << NNZ << "\n"; input.close(); return true; } template bool loadMarketVector(VectorType& vec, const std::string& filename) { typedef typename VectorType::Scalar Scalar; std::ifstream in(filename.c_str(), std::ios::in); if(!in) return false; std::string line; int n(0), col(0); do { // Skip comments std::getline(in, line); eigen_assert(in.good()); } while (line[0] == '%'); std::istringstream newline(line); newline >> n >> col; eigen_assert(n>0 && col>0); vec.resize(n); int i = 0; Scalar value; while ( std::getline(in, line) && (i < n) ){ internal::GetVectorElt(line, value); vec(i++) = value; } in.close(); if (i!=n){ std::cerr<< "Unable to read all elements from file " << filename << "\n"; return false; } return true; } template bool saveMarket(const SparseMatrixType& mat, const std::string& filename, int sym = 0) { typedef typename SparseMatrixType::Scalar Scalar; std::ofstream out(filename.c_str(),std::ios::out); if(!out) return false; out.flags(std::ios_base::scientific); out.precision(64); std::string header; internal::putMarketHeader(header, sym); out << header << std::endl; out << mat.rows() << " " << mat.cols() << " " << mat.nonZeros() << "\n"; int count = 0; for(int j=0; j bool saveMarketVector (const VectorType& vec, const std::string& filename) { typedef typename VectorType::Scalar Scalar; std::ofstream out(filename.c_str(),std::ios::out); if(!out) return false; out.flags(std::ios_base::scientific); out.precision(64); if(internal::is_same >::value || internal::is_same >::value) out << "%%MatrixMarket matrix array complex general\n"; else out << "%%MatrixMarket matrix array real general\n"; out << vec.size() << " "<< 1 << "\n"; for (int i=0; i < vec.size(); i++){ internal::putVectorElt(vec(i), out); } out.close(); return true; } } // end namespace Eigen #endif // EIGEN_SPARSE_MARKET_IO_H RcppEigen/inst/include/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h0000644000176200001440000001677113115262204027500 0ustar liggesusers // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Desire NUENTSA WAKAM // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BROWSE_MATRICES_H #define EIGEN_BROWSE_MATRICES_H namespace Eigen { enum { SPD = 0x100, NonSymmetric = 0x0 }; /** * @brief Iterator to browse matrices from a specified folder * * This is used to load all the matrices from a folder. * The matrices should be in Matrix Market format * It is assumed that the matrices are named as matname.mtx * and matname_SPD.mtx if the matrix is Symmetric and positive definite (or Hermitian) * The right hand side vectors are loaded as well, if they exist. * They should be named as matname_b.mtx. * Note that the right hand side for a SPD matrix is named as matname_SPD_b.mtx * * Sometimes a reference solution is available. In this case, it should be named as matname_x.mtx * * Sample code * \code * * \endcode * * \tparam Scalar The scalar type */ template class MatrixMarketIterator { typedef typename NumTraits::Real RealScalar; public: typedef Matrix VectorType; typedef SparseMatrix MatrixType; public: MatrixMarketIterator(const std::string &folder) : m_sym(0), m_isvalid(false), m_matIsLoaded(false), m_hasRhs(false), m_hasrefX(false), m_folder(folder) { m_folder_id = opendir(folder.c_str()); if(m_folder_id) Getnextvalidmatrix(); } ~MatrixMarketIterator() { if (m_folder_id) closedir(m_folder_id); } inline MatrixMarketIterator& operator++() { m_matIsLoaded = false; m_hasrefX = false; m_hasRhs = false; Getnextvalidmatrix(); return *this; } inline operator bool() const { return m_isvalid;} /** Return the sparse matrix corresponding to the current file */ inline MatrixType& matrix() { // Read the matrix if (m_matIsLoaded) return m_mat; std::string matrix_file = m_folder + "/" + m_matname + ".mtx"; if ( !loadMarket(m_mat, matrix_file)) { std::cerr << "Warning loadMarket failed when loading \"" << matrix_file << "\"" << std::endl; m_matIsLoaded = false; return m_mat; } m_matIsLoaded = true; if (m_sym != NonSymmetric) { // Check whether we need to restore a full matrix: RealScalar diag_norm = m_mat.diagonal().norm(); RealScalar lower_norm = m_mat.template triangularView().norm(); RealScalar upper_norm = m_mat.template triangularView().norm(); if(lower_norm>diag_norm && upper_norm==diag_norm) { // only the lower part is stored MatrixType tmp(m_mat); m_mat = tmp.template selfadjointView(); } else if(upper_norm>diag_norm && lower_norm==diag_norm) { // only the upper part is stored MatrixType tmp(m_mat); m_mat = tmp.template selfadjointView(); } } return m_mat; } /** Return the right hand side corresponding to the current matrix. * If the rhs file is not provided, a random rhs is generated */ inline VectorType& rhs() { // Get the right hand side if (m_hasRhs) return m_rhs; std::string rhs_file; rhs_file = m_folder + "/" + m_matname + "_b.mtx"; // The pattern is matname_b.mtx m_hasRhs = Fileexists(rhs_file); if (m_hasRhs) { m_rhs.resize(m_mat.cols()); m_hasRhs = loadMarketVector(m_rhs, rhs_file); } if (!m_hasRhs) { // Generate a random right hand side if (!m_matIsLoaded) this->matrix(); m_refX.resize(m_mat.cols()); m_refX.setRandom(); m_rhs = m_mat * m_refX; m_hasrefX = true; m_hasRhs = true; } return m_rhs; } /** Return a reference solution * If it is not provided and if the right hand side is not available * then refX is randomly generated such that A*refX = b * where A and b are the matrix and the rhs. * Note that when a rhs is provided, refX is not available */ inline VectorType& refX() { // Check if a reference solution is provided if (m_hasrefX) return m_refX; std::string lhs_file; lhs_file = m_folder + "/" + m_matname + "_x.mtx"; m_hasrefX = Fileexists(lhs_file); if (m_hasrefX) { m_refX.resize(m_mat.cols()); m_hasrefX = loadMarketVector(m_refX, lhs_file); } else m_refX.resize(0); return m_refX; } inline std::string& matname() { return m_matname; } inline int sym() { return m_sym; } bool hasRhs() {return m_hasRhs; } bool hasrefX() {return m_hasrefX; } bool isFolderValid() { return bool(m_folder_id); } protected: inline bool Fileexists(std::string file) { std::ifstream file_id(file.c_str()); if (!file_id.good() ) { return false; } else { file_id.close(); return true; } } void Getnextvalidmatrix( ) { m_isvalid = false; // Here, we return with the next valid matrix in the folder while ( (m_curs_id = readdir(m_folder_id)) != NULL) { m_isvalid = false; std::string curfile; curfile = m_folder + "/" + m_curs_id->d_name; // Discard if it is a folder #if !(defined(__sun) || defined(_AIX) || defined(__hpux) || defined(__sgi) || defined(__HAIKU__)) if (m_curs_id->d_type == DT_DIR) continue; //FIXME This may not be available on non BSD systems #endif // struct stat st_buf; // stat (curfile.c_str(), &st_buf); // if (S_ISDIR(st_buf.st_mode)) continue; // Determine from the header if it is a matrix or a right hand side bool isvector,iscomplex=false; if(!getMarketHeader(curfile,m_sym,iscomplex,isvector)) continue; if(isvector) continue; if (!iscomplex) { if(internal::is_same >::value || internal::is_same >::value) continue; } if (iscomplex) { if(internal::is_same::value || internal::is_same::value) continue; } // Get the matrix name std::string filename = m_curs_id->d_name; m_matname = filename.substr(0, filename.length()-4); // Find if the matrix is SPD size_t found = m_matname.find("SPD"); if( (found!=std::string::npos) && (m_sym != NonSymmetric) ) m_sym = SPD; m_isvalid = true; break; } } int m_sym; // Symmetry of the matrix MatrixType m_mat; // Current matrix VectorType m_rhs; // Current vector VectorType m_refX; // The reference solution, if exists std::string m_matname; // Matrix Name bool m_isvalid; bool m_matIsLoaded; // Determine if the matrix has already been loaded from the file bool m_hasRhs; // The right hand side exists bool m_hasrefX; // A reference solution is provided std::string m_folder; DIR * m_folder_id; struct dirent *m_curs_id; }; } // end namespace Eigen #endif RcppEigen/inst/include/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h0000644000176200001440000001024412253717461030546 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H #define EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H namespace Eigen { #if 0 // NOTE Have to be reimplemented as a specialization of BlockImpl< DynamicSparseMatrix<_Scalar, _Options, _Index>, ... > // See SparseBlock.h for an example /*************************************************************************** * specialisation for DynamicSparseMatrix ***************************************************************************/ template class SparseInnerVectorSet, Size> : public SparseMatrixBase, Size> > { typedef DynamicSparseMatrix<_Scalar, _Options, _Index> MatrixType; public: enum { IsRowMajor = internal::traits::IsRowMajor }; EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet) class InnerIterator: public MatrixType::InnerIterator { public: inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer) : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: Index m_outer; }; inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize) : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize) { eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); } inline SparseInnerVectorSet(const MatrixType& matrix, Index outer) : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size) { eigen_assert(Size!=Dynamic); eigen_assert( (outer>=0) && (outer inline SparseInnerVectorSet& operator=(const SparseMatrixBase& other) { if (IsRowMajor != ((OtherDerived::Flags&RowMajorBit)==RowMajorBit)) { // need to transpose => perform a block evaluation followed by a big swap DynamicSparseMatrix aux(other); *this = aux.markAsRValue(); } else { // evaluate/copy vector per vector for (Index j=0; j aux(other.innerVector(j)); m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data()); } } return *this; } inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other) { return operator=(other); } Index nonZeros() const { Index count = 0; for (Index j=0; j0); return m_matrix.data()[m_outerStart].vale(m_matrix.data()[m_outerStart].size()-1); } // template // inline SparseInnerVectorSet& operator=(const SparseMatrixBase& other) // { // return *this; // } EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } protected: const typename MatrixType::Nested m_matrix; Index m_outerStart; const internal::variable_if_dynamic m_outerSize; }; #endif } // end namespace Eigen #endif // EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H RcppEigen/inst/include/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h0000644000176200001440000011657313100733213026751 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2013 Desire Nuentsa // Copyright (C) 2013 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEBLOCKMATRIX_H #define EIGEN_SPARSEBLOCKMATRIX_H namespace Eigen { /** \ingroup SparseCore_Module * * \class BlockSparseMatrix * * \brief A versatile sparse matrix representation where each element is a block * * This class provides routines to manipulate block sparse matrices stored in a * BSR-like representation. There are two main types : * * 1. All blocks have the same number of rows and columns, called block size * in the following. In this case, if this block size is known at compile time, * it can be given as a template parameter like * \code * BlockSparseMatrix bmat(b_rows, b_cols); * \endcode * Here, bmat is a b_rows x b_cols block sparse matrix * where each coefficient is a 3x3 dense matrix. * If the block size is fixed but will be given at runtime, * \code * BlockSparseMatrix bmat(b_rows, b_cols); * bmat.setBlockSize(block_size); * \endcode * * 2. The second case is for variable-block sparse matrices. * Here each block has its own dimensions. The only restriction is that all the blocks * in a row (resp. a column) should have the same number of rows (resp. of columns). * It is thus required in this case to describe the layout of the matrix by calling * setBlockLayout(rowBlocks, colBlocks). * * In any of the previous case, the matrix can be filled by calling setFromTriplets(). * A regular sparse matrix can be converted to a block sparse matrix and vice versa. * It is obviously required to describe the block layout beforehand by calling either * setBlockSize() for fixed-size blocks or setBlockLayout for variable-size blocks. * * \tparam _Scalar The Scalar type * \tparam _BlockAtCompileTime The block layout option. It takes the following values * Dynamic : block size known at runtime * a numeric number : fixed-size block known at compile time */ template class BlockSparseMatrix; template class BlockSparseMatrixView; namespace internal { template struct traits > { typedef _Scalar Scalar; typedef _Index Index; typedef Sparse StorageKind; // FIXME Where is it used ?? typedef MatrixXpr XprKind; enum { RowsAtCompileTime = Dynamic, ColsAtCompileTime = Dynamic, MaxRowsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic, BlockSize = _BlockAtCompileTime, Flags = _Options | NestByRefBit | LvalueBit, CoeffReadCost = NumTraits::ReadCost, SupportedAccessPatterns = InnerRandomAccessPattern }; }; template struct traits > { typedef Ref > Scalar; typedef Ref > RealScalar; }; // Function object to sort a triplet list template struct TripletComp { typedef typename Iterator::value_type Triplet; bool operator()(const Triplet& a, const Triplet& b) { if(IsColMajor) return ((a.col() == b.col() && a.row() < b.row()) || (a.col() < b.col())); else return ((a.row() == b.row() && a.col() < b.col()) || (a.row() < b.row())); } }; } // end namespace internal /* Proxy to view the block sparse matrix as a regular sparse matrix */ template class BlockSparseMatrixView : public SparseMatrixBase { public: typedef Ref Scalar; typedef Ref RealScalar; typedef typename BlockSparseMatrixT::Index Index; typedef BlockSparseMatrixT Nested; enum { Flags = BlockSparseMatrixT::Options, Options = BlockSparseMatrixT::Options, RowsAtCompileTime = BlockSparseMatrixT::RowsAtCompileTime, ColsAtCompileTime = BlockSparseMatrixT::ColsAtCompileTime, MaxColsAtCompileTime = BlockSparseMatrixT::MaxColsAtCompileTime, MaxRowsAtCompileTime = BlockSparseMatrixT::MaxRowsAtCompileTime }; public: BlockSparseMatrixView(const BlockSparseMatrixT& spblockmat) : m_spblockmat(spblockmat) {} Index outerSize() const { return (Flags&RowMajorBit) == 1 ? this->rows() : this->cols(); } Index cols() const { return m_spblockmat.blockCols(); } Index rows() const { return m_spblockmat.blockRows(); } Scalar coeff(Index row, Index col) { return m_spblockmat.coeff(row, col); } Scalar coeffRef(Index row, Index col) { return m_spblockmat.coeffRef(row, col); } // Wrapper to iterate over all blocks class InnerIterator : public BlockSparseMatrixT::BlockInnerIterator { public: InnerIterator(const BlockSparseMatrixView& mat, Index outer) : BlockSparseMatrixT::BlockInnerIterator(mat.m_spblockmat, outer) {} }; protected: const BlockSparseMatrixT& m_spblockmat; }; // Proxy to view a regular vector as a block vector template class BlockVectorView { public: enum { BlockSize = BlockSparseMatrixT::BlockSize, ColsAtCompileTime = VectorType::ColsAtCompileTime, RowsAtCompileTime = VectorType::RowsAtCompileTime, Flags = VectorType::Flags }; typedef Ref >Scalar; typedef typename BlockSparseMatrixT::Index Index; public: BlockVectorView(const BlockSparseMatrixT& spblockmat, const VectorType& vec) : m_spblockmat(spblockmat),m_vec(vec) { } inline Index cols() const { return m_vec.cols(); } inline Index size() const { return m_spblockmat.blockRows(); } inline Scalar coeff(Index bi) const { Index startRow = m_spblockmat.blockRowsIndex(bi); Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow; return m_vec.middleRows(startRow, rowSize); } inline Scalar coeff(Index bi, Index j) const { Index startRow = m_spblockmat.blockRowsIndex(bi); Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow; return m_vec.block(startRow, j, rowSize, 1); } protected: const BlockSparseMatrixT& m_spblockmat; const VectorType& m_vec; }; template class BlockVectorReturn; // Proxy to view a regular vector as a block vector template class BlockVectorReturn { public: enum { ColsAtCompileTime = VectorType::ColsAtCompileTime, RowsAtCompileTime = VectorType::RowsAtCompileTime, Flags = VectorType::Flags }; typedef Ref > Scalar; typedef typename BlockSparseMatrixT::Index Index; public: BlockVectorReturn(const BlockSparseMatrixT& spblockmat, VectorType& vec) : m_spblockmat(spblockmat),m_vec(vec) { } inline Index size() const { return m_spblockmat.blockRows(); } inline Scalar coeffRef(Index bi) { Index startRow = m_spblockmat.blockRowsIndex(bi); Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow; return m_vec.middleRows(startRow, rowSize); } inline Scalar coeffRef(Index bi, Index j) { Index startRow = m_spblockmat.blockRowsIndex(bi); Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow; return m_vec.block(startRow, j, rowSize, 1); } protected: const BlockSparseMatrixT& m_spblockmat; VectorType& m_vec; }; // Block version of the sparse dense product template class BlockSparseTimeDenseProduct; namespace internal { template struct traits > { typedef Dense StorageKind; typedef MatrixXpr XprKind; typedef typename BlockSparseMatrixT::Scalar Scalar; typedef typename BlockSparseMatrixT::Index Index; enum { RowsAtCompileTime = Dynamic, ColsAtCompileTime = Dynamic, MaxRowsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic, Flags = 0, CoeffReadCost = internal::traits::CoeffReadCost }; }; } // end namespace internal template class BlockSparseTimeDenseProduct : public ProductBase, Lhs, Rhs> { public: EIGEN_PRODUCT_PUBLIC_INTERFACE(BlockSparseTimeDenseProduct) BlockSparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} template void scaleAndAddTo(Dest& dest, const typename Rhs::Scalar& alpha) const { BlockVectorReturn tmpDest(m_lhs, dest); internal::sparse_time_dense_product( BlockSparseMatrixView(m_lhs), BlockVectorView(m_lhs, m_rhs), tmpDest, alpha); } private: BlockSparseTimeDenseProduct& operator=(const BlockSparseTimeDenseProduct&); }; template class BlockSparseMatrix : public SparseMatrixBase > { public: typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef _StorageIndex StorageIndex; typedef typename internal::ref_selector >::type Nested; enum { Options = _Options, Flags = Options, BlockSize=_BlockAtCompileTime, RowsAtCompileTime = Dynamic, ColsAtCompileTime = Dynamic, MaxRowsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic, IsVectorAtCompileTime = 0, IsColMajor = Flags&RowMajorBit ? 0 : 1 }; typedef Matrix BlockScalar; typedef Matrix BlockRealScalar; typedef typename internal::conditional<_BlockAtCompileTime==Dynamic, Scalar, BlockScalar>::type BlockScalarReturnType; typedef BlockSparseMatrix PlainObject; public: // Default constructor BlockSparseMatrix() : m_innerBSize(0),m_outerBSize(0),m_innerOffset(0),m_outerOffset(0), m_nonzerosblocks(0),m_values(0),m_blockPtr(0),m_indices(0), m_outerIndex(0),m_blockSize(BlockSize) { } /** * \brief Construct and resize * */ BlockSparseMatrix(Index brow, Index bcol) : m_innerBSize(IsColMajor ? brow : bcol), m_outerBSize(IsColMajor ? bcol : brow), m_innerOffset(0),m_outerOffset(0),m_nonzerosblocks(0), m_values(0),m_blockPtr(0),m_indices(0), m_outerIndex(0),m_blockSize(BlockSize) { } /** * \brief Copy-constructor */ BlockSparseMatrix(const BlockSparseMatrix& other) : m_innerBSize(other.m_innerBSize),m_outerBSize(other.m_outerBSize), m_nonzerosblocks(other.m_nonzerosblocks),m_nonzeros(other.m_nonzeros), m_blockPtr(0),m_blockSize(other.m_blockSize) { // should we allow copying between variable-size blocks and fixed-size blocks ?? eigen_assert(m_blockSize == BlockSize && " CAN NOT COPY BETWEEN FIXED-SIZE AND VARIABLE-SIZE BLOCKS"); std::copy(other.m_innerOffset, other.m_innerOffset+m_innerBSize+1, m_innerOffset); std::copy(other.m_outerOffset, other.m_outerOffset+m_outerBSize+1, m_outerOffset); std::copy(other.m_values, other.m_values+m_nonzeros, m_values); if(m_blockSize != Dynamic) std::copy(other.m_blockPtr, other.m_blockPtr+m_nonzerosblocks, m_blockPtr); std::copy(other.m_indices, other.m_indices+m_nonzerosblocks, m_indices); std::copy(other.m_outerIndex, other.m_outerIndex+m_outerBSize, m_outerIndex); } friend void swap(BlockSparseMatrix& first, BlockSparseMatrix& second) { std::swap(first.m_innerBSize, second.m_innerBSize); std::swap(first.m_outerBSize, second.m_outerBSize); std::swap(first.m_innerOffset, second.m_innerOffset); std::swap(first.m_outerOffset, second.m_outerOffset); std::swap(first.m_nonzerosblocks, second.m_nonzerosblocks); std::swap(first.m_nonzeros, second.m_nonzeros); std::swap(first.m_values, second.m_values); std::swap(first.m_blockPtr, second.m_blockPtr); std::swap(first.m_indices, second.m_indices); std::swap(first.m_outerIndex, second.m_outerIndex); std::swap(first.m_BlockSize, second.m_blockSize); } BlockSparseMatrix& operator=(BlockSparseMatrix other) { //Copy-and-swap paradigm ... avoid leaked data if thrown swap(*this, other); return *this; } // Destructor ~BlockSparseMatrix() { delete[] m_outerIndex; delete[] m_innerOffset; delete[] m_outerOffset; delete[] m_indices; delete[] m_blockPtr; delete[] m_values; } /** * \brief Constructor from a sparse matrix * */ template inline BlockSparseMatrix(const MatrixType& spmat) : m_blockSize(BlockSize) { EIGEN_STATIC_ASSERT((m_blockSize != Dynamic), THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE); *this = spmat; } /** * \brief Assignment from a sparse matrix with the same storage order * * Convert from a sparse matrix to block sparse matrix. * \warning Before calling this function, tt is necessary to call * either setBlockLayout() (matrices with variable-size blocks) * or setBlockSize() (for fixed-size blocks). */ template inline BlockSparseMatrix& operator=(const MatrixType& spmat) { eigen_assert((m_innerBSize != 0 && m_outerBSize != 0) && "Trying to assign to a zero-size matrix, call resize() first"); eigen_assert(((MatrixType::Options&RowMajorBit) != IsColMajor) && "Wrong storage order"); typedef SparseMatrix MatrixPatternType; MatrixPatternType blockPattern(blockRows(), blockCols()); m_nonzeros = 0; // First, compute the number of nonzero blocks and their locations for(StorageIndex bj = 0; bj < m_outerBSize; ++bj) { // Browse each outer block and compute the structure std::vector nzblocksFlag(m_innerBSize,false); // Record the existing blocks blockPattern.startVec(bj); for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j) { typename MatrixType::InnerIterator it_spmat(spmat, j); for(; it_spmat; ++it_spmat) { StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block if(!nzblocksFlag[bi]) { // Save the index of this nonzero block nzblocksFlag[bi] = true; blockPattern.insertBackByOuterInnerUnordered(bj, bi) = true; // Compute the total number of nonzeros (including explicit zeros in blocks) m_nonzeros += blockOuterSize(bj) * blockInnerSize(bi); } } } // end current outer block } blockPattern.finalize(); // Allocate the internal arrays setBlockStructure(blockPattern); for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0); for(StorageIndex bj = 0; bj < m_outerBSize; ++bj) { // Now copy the values for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j) { // Browse the outer block column by column (for column-major matrices) typename MatrixType::InnerIterator it_spmat(spmat, j); for(; it_spmat; ++it_spmat) { StorageIndex idx = 0; // Position of this block in the column block StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block // Go to the inner block where this element belongs to while(bi > m_indices[m_outerIndex[bj]+idx]) ++idx; // Not expensive for ordered blocks StorageIndex idxVal;// Get the right position in the array of values for this element if(m_blockSize == Dynamic) { // Offset from all blocks before ... idxVal = m_blockPtr[m_outerIndex[bj]+idx]; // ... and offset inside the block idxVal += (j - blockOuterIndex(bj)) * blockOuterSize(bj) + it_spmat.index() - m_innerOffset[bi]; } else { // All blocks before idxVal = (m_outerIndex[bj] + idx) * m_blockSize * m_blockSize; // inside the block idxVal += (j - blockOuterIndex(bj)) * m_blockSize + (it_spmat.index()%m_blockSize); } // Insert the value m_values[idxVal] = it_spmat.value(); } // end of this column } // end of this block } // end of this outer block return *this; } /** * \brief Set the nonzero block pattern of the matrix * * Given a sparse matrix describing the nonzero block pattern, * this function prepares the internal pointers for values. * After calling this function, any *nonzero* block (bi, bj) can be set * with a simple call to coeffRef(bi,bj). * * * \warning Before calling this function, tt is necessary to call * either setBlockLayout() (matrices with variable-size blocks) * or setBlockSize() (for fixed-size blocks). * * \param blockPattern Sparse matrix of boolean elements describing the block structure * * \sa setBlockLayout() \sa setBlockSize() */ template void setBlockStructure(const MatrixType& blockPattern) { resize(blockPattern.rows(), blockPattern.cols()); reserve(blockPattern.nonZeros()); // Browse the block pattern and set up the various pointers m_outerIndex[0] = 0; if(m_blockSize == Dynamic) m_blockPtr[0] = 0; for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0); for(StorageIndex bj = 0; bj < m_outerBSize; ++bj) { //Browse each outer block //First, copy and save the indices of nonzero blocks //FIXME : find a way to avoid this ... std::vector nzBlockIdx; typename MatrixType::InnerIterator it(blockPattern, bj); for(; it; ++it) { nzBlockIdx.push_back(it.index()); } std::sort(nzBlockIdx.begin(), nzBlockIdx.end()); // Now, fill block indices and (eventually) pointers to blocks for(StorageIndex idx = 0; idx < nzBlockIdx.size(); ++idx) { StorageIndex offset = m_outerIndex[bj]+idx; // offset in m_indices m_indices[offset] = nzBlockIdx[idx]; if(m_blockSize == Dynamic) m_blockPtr[offset] = m_blockPtr[offset-1] + blockInnerSize(nzBlockIdx[idx]) * blockOuterSize(bj); // There is no blockPtr for fixed-size blocks... not needed !??? } // Save the pointer to the next outer block m_outerIndex[bj+1] = m_outerIndex[bj] + nzBlockIdx.size(); } } /** * \brief Set the number of rows and columns blocks */ inline void resize(Index brow, Index bcol) { m_innerBSize = IsColMajor ? brow : bcol; m_outerBSize = IsColMajor ? bcol : brow; } /** * \brief set the block size at runtime for fixed-size block layout * * Call this only for fixed-size blocks */ inline void setBlockSize(Index blockSize) { m_blockSize = blockSize; } /** * \brief Set the row and column block layouts, * * This function set the size of each row and column block. * So this function should be used only for blocks with variable size. * \param rowBlocks : Number of rows per row block * \param colBlocks : Number of columns per column block * \sa resize(), setBlockSize() */ inline void setBlockLayout(const VectorXi& rowBlocks, const VectorXi& colBlocks) { const VectorXi& innerBlocks = IsColMajor ? rowBlocks : colBlocks; const VectorXi& outerBlocks = IsColMajor ? colBlocks : rowBlocks; eigen_assert(m_innerBSize == innerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS"); eigen_assert(m_outerBSize == outerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS"); m_outerBSize = outerBlocks.size(); // starting index of blocks... cumulative sums m_innerOffset = new StorageIndex[m_innerBSize+1]; m_outerOffset = new StorageIndex[m_outerBSize+1]; m_innerOffset[0] = 0; m_outerOffset[0] = 0; std::partial_sum(&innerBlocks[0], &innerBlocks[m_innerBSize-1]+1, &m_innerOffset[1]); std::partial_sum(&outerBlocks[0], &outerBlocks[m_outerBSize-1]+1, &m_outerOffset[1]); // Compute the total number of nonzeros m_nonzeros = 0; for(StorageIndex bj = 0; bj < m_outerBSize; ++bj) for(StorageIndex bi = 0; bi < m_innerBSize; ++bi) m_nonzeros += outerBlocks[bj] * innerBlocks[bi]; } /** * \brief Allocate the internal array of pointers to blocks and their inner indices * * \note For fixed-size blocks, call setBlockSize() to set the block. * And For variable-size blocks, call setBlockLayout() before using this function * * \param nonzerosblocks Number of nonzero blocks. The total number of nonzeros is * is computed in setBlockLayout() for variable-size blocks * \sa setBlockSize() */ inline void reserve(const Index nonzerosblocks) { eigen_assert((m_innerBSize != 0 && m_outerBSize != 0) && "TRYING TO RESERVE ZERO-SIZE MATRICES, CALL resize() first"); //FIXME Should free if already allocated m_outerIndex = new StorageIndex[m_outerBSize+1]; m_nonzerosblocks = nonzerosblocks; if(m_blockSize != Dynamic) { m_nonzeros = nonzerosblocks * (m_blockSize * m_blockSize); m_blockPtr = 0; } else { // m_nonzeros is already computed in setBlockLayout() m_blockPtr = new StorageIndex[m_nonzerosblocks+1]; } m_indices = new StorageIndex[m_nonzerosblocks+1]; m_values = new Scalar[m_nonzeros]; } /** * \brief Fill values in a matrix from a triplet list. * * Each triplet item has a block stored in an Eigen dense matrix. * The InputIterator class should provide the functions row(), col() and value() * * \note For fixed-size blocks, call setBlockSize() before this function. * * FIXME Do not accept duplicates */ template void setFromTriplets(const InputIterator& begin, const InputIterator& end) { eigen_assert((m_innerBSize!=0 && m_outerBSize !=0) && "ZERO BLOCKS, PLEASE CALL resize() before"); /* First, sort the triplet list * FIXME This can be unnecessarily expensive since only the inner indices have to be sorted * The best approach is like in SparseMatrix::setFromTriplets() */ internal::TripletComp tripletcomp; std::sort(begin, end, tripletcomp); /* Count the number of rows and column blocks, * and the number of nonzero blocks per outer dimension */ VectorXi rowBlocks(m_innerBSize); // Size of each block row VectorXi colBlocks(m_outerBSize); // Size of each block column rowBlocks.setZero(); colBlocks.setZero(); VectorXi nzblock_outer(m_outerBSize); // Number of nz blocks per outer vector VectorXi nz_outer(m_outerBSize); // Number of nz per outer vector...for variable-size blocks nzblock_outer.setZero(); nz_outer.setZero(); for(InputIterator it(begin); it !=end; ++it) { eigen_assert(it->row() >= 0 && it->row() < this->blockRows() && it->col() >= 0 && it->col() < this->blockCols()); eigen_assert((it->value().rows() == it->value().cols() && (it->value().rows() == m_blockSize)) || (m_blockSize == Dynamic)); if(m_blockSize == Dynamic) { eigen_assert((rowBlocks[it->row()] == 0 || rowBlocks[it->row()] == it->value().rows()) && "NON CORRESPONDING SIZES FOR ROW BLOCKS"); eigen_assert((colBlocks[it->col()] == 0 || colBlocks[it->col()] == it->value().cols()) && "NON CORRESPONDING SIZES FOR COLUMN BLOCKS"); rowBlocks[it->row()] =it->value().rows(); colBlocks[it->col()] = it->value().cols(); } nz_outer(IsColMajor ? it->col() : it->row()) += it->value().rows() * it->value().cols(); nzblock_outer(IsColMajor ? it->col() : it->row())++; } // Allocate member arrays if(m_blockSize == Dynamic) setBlockLayout(rowBlocks, colBlocks); StorageIndex nzblocks = nzblock_outer.sum(); reserve(nzblocks); // Temporary markers VectorXi block_id(m_outerBSize); // To be used as a block marker during insertion // Setup outer index pointers and markers m_outerIndex[0] = 0; if (m_blockSize == Dynamic) m_blockPtr[0] = 0; for(StorageIndex bj = 0; bj < m_outerBSize; ++bj) { m_outerIndex[bj+1] = m_outerIndex[bj] + nzblock_outer(bj); block_id(bj) = m_outerIndex[bj]; if(m_blockSize==Dynamic) { m_blockPtr[m_outerIndex[bj+1]] = m_blockPtr[m_outerIndex[bj]] + nz_outer(bj); } } // Fill the matrix for(InputIterator it(begin); it!=end; ++it) { StorageIndex outer = IsColMajor ? it->col() : it->row(); StorageIndex inner = IsColMajor ? it->row() : it->col(); m_indices[block_id(outer)] = inner; StorageIndex block_size = it->value().rows()*it->value().cols(); StorageIndex nz_marker = blockPtr(block_id[outer]); memcpy(&(m_values[nz_marker]), it->value().data(), block_size * sizeof(Scalar)); if(m_blockSize == Dynamic) { m_blockPtr[block_id(outer)+1] = m_blockPtr[block_id(outer)] + block_size; } block_id(outer)++; } // An alternative when the outer indices are sorted...no need to use an array of markers // for(Index bcol = 0; bcol < m_outerBSize; ++bcol) // { // Index id = 0, id_nz = 0, id_nzblock = 0; // for(InputIterator it(begin); it!=end; ++it) // { // while (idvalue().rows()*it->value().cols(); // m_blockPtr[id_nzblock+1] = m_blockPtr[id_nzblock] + block_size; // id_nzblock++; // memcpy(&(m_values[id_nz]),it->value().data(), block_size*sizeof(Scalar)); // id_nz += block_size; // } // while(id < m_outerBSize-1) // Empty columns at the end // { // id++; // m_outerIndex[id+1]=m_outerIndex[id]; // } // } } /** * \returns the number of rows */ inline Index rows() const { // return blockRows(); return (IsColMajor ? innerSize() : outerSize()); } /** * \returns the number of cols */ inline Index cols() const { // return blockCols(); return (IsColMajor ? outerSize() : innerSize()); } inline Index innerSize() const { if(m_blockSize == Dynamic) return m_innerOffset[m_innerBSize]; else return (m_innerBSize * m_blockSize) ; } inline Index outerSize() const { if(m_blockSize == Dynamic) return m_outerOffset[m_outerBSize]; else return (m_outerBSize * m_blockSize) ; } /** \returns the number of rows grouped by blocks */ inline Index blockRows() const { return (IsColMajor ? m_innerBSize : m_outerBSize); } /** \returns the number of columns grouped by blocks */ inline Index blockCols() const { return (IsColMajor ? m_outerBSize : m_innerBSize); } inline Index outerBlocks() const { return m_outerBSize; } inline Index innerBlocks() const { return m_innerBSize; } /** \returns the block index where outer belongs to */ inline Index outerToBlock(Index outer) const { eigen_assert(outer < outerSize() && "OUTER INDEX OUT OF BOUNDS"); if(m_blockSize != Dynamic) return (outer / m_blockSize); // Integer division StorageIndex b_outer = 0; while(m_outerOffset[b_outer] <= outer) ++b_outer; return b_outer - 1; } /** \returns the block index where inner belongs to */ inline Index innerToBlock(Index inner) const { eigen_assert(inner < innerSize() && "OUTER INDEX OUT OF BOUNDS"); if(m_blockSize != Dynamic) return (inner / m_blockSize); // Integer division StorageIndex b_inner = 0; while(m_innerOffset[b_inner] <= inner) ++b_inner; return b_inner - 1; } /** *\returns a reference to the (i,j) block as an Eigen Dense Matrix */ Ref coeffRef(Index brow, Index bcol) { eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS"); eigen_assert(bcol < blockCols() && "BLOCK nzblocksFlagCOLUMN OUT OF BOUNDS"); StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol); StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow); StorageIndex inner = IsColMajor ? brow : bcol; StorageIndex outer = IsColMajor ? bcol : brow; StorageIndex offset = m_outerIndex[outer]; while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner) offset++; if(m_indices[offset] == inner) { return Map(&(m_values[blockPtr(offset)]), rsize, csize); } else { //FIXME the block does not exist, Insert it !!!!!!!!! eigen_assert("DYNAMIC INSERTION IS NOT YET SUPPORTED"); } } /** * \returns the value of the (i,j) block as an Eigen Dense Matrix */ Map coeff(Index brow, Index bcol) const { eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS"); eigen_assert(bcol < blockCols() && "BLOCK COLUMN OUT OF BOUNDS"); StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol); StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow); StorageIndex inner = IsColMajor ? brow : bcol; StorageIndex outer = IsColMajor ? bcol : brow; StorageIndex offset = m_outerIndex[outer]; while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner) offset++; if(m_indices[offset] == inner) { return Map (&(m_values[blockPtr(offset)]), rsize, csize); } else // return BlockScalar::Zero(rsize, csize); eigen_assert("NOT YET SUPPORTED"); } // Block Matrix times vector product template BlockSparseTimeDenseProduct operator*(const VecType& lhs) const { return BlockSparseTimeDenseProduct(*this, lhs); } /** \returns the number of nonzero blocks */ inline Index nonZerosBlocks() const { return m_nonzerosblocks; } /** \returns the total number of nonzero elements, including eventual explicit zeros in blocks */ inline Index nonZeros() const { return m_nonzeros; } inline BlockScalarReturnType *valuePtr() {return static_cast(m_values);} // inline Scalar *valuePtr(){ return m_values; } inline StorageIndex *innerIndexPtr() {return m_indices; } inline const StorageIndex *innerIndexPtr() const {return m_indices; } inline StorageIndex *outerIndexPtr() {return m_outerIndex; } inline const StorageIndex* outerIndexPtr() const {return m_outerIndex; } /** \brief for compatibility purposes with the SparseMatrix class */ inline bool isCompressed() const {return true;} /** * \returns the starting index of the bi row block */ inline Index blockRowsIndex(Index bi) const { return IsColMajor ? blockInnerIndex(bi) : blockOuterIndex(bi); } /** * \returns the starting index of the bj col block */ inline Index blockColsIndex(Index bj) const { return IsColMajor ? blockOuterIndex(bj) : blockInnerIndex(bj); } inline Index blockOuterIndex(Index bj) const { return (m_blockSize == Dynamic) ? m_outerOffset[bj] : (bj * m_blockSize); } inline Index blockInnerIndex(Index bi) const { return (m_blockSize == Dynamic) ? m_innerOffset[bi] : (bi * m_blockSize); } // Not needed ??? inline Index blockInnerSize(Index bi) const { return (m_blockSize == Dynamic) ? (m_innerOffset[bi+1] - m_innerOffset[bi]) : m_blockSize; } inline Index blockOuterSize(Index bj) const { return (m_blockSize == Dynamic) ? (m_outerOffset[bj+1]- m_outerOffset[bj]) : m_blockSize; } /** * \brief Browse the matrix by outer index */ class InnerIterator; // Browse column by column /** * \brief Browse the matrix by block outer index */ class BlockInnerIterator; // Browse block by block friend std::ostream & operator << (std::ostream & s, const BlockSparseMatrix& m) { for (StorageIndex j = 0; j < m.outerBlocks(); ++j) { BlockInnerIterator itb(m, j); for(; itb; ++itb) { s << "("< in the array of values */ Index blockPtr(Index id) const { if(m_blockSize == Dynamic) return m_blockPtr[id]; else return id * m_blockSize * m_blockSize; //return blockDynIdx(id, typename internal::conditional<(BlockSize==Dynamic), internal::true_type, internal::false_type>::type()); } protected: // inline Index blockDynIdx(Index id, internal::true_type) const // { // return m_blockPtr[id]; // } // inline Index blockDynIdx(Index id, internal::false_type) const // { // return id * BlockSize * BlockSize; // } // To be implemented // Insert a block at a particular location... need to make a room for that Map insert(Index brow, Index bcol); Index m_innerBSize; // Number of block rows Index m_outerBSize; // Number of block columns StorageIndex *m_innerOffset; // Starting index of each inner block (size m_innerBSize+1) StorageIndex *m_outerOffset; // Starting index of each outer block (size m_outerBSize+1) Index m_nonzerosblocks; // Total nonzeros blocks (lower than m_innerBSize x m_outerBSize) Index m_nonzeros; // Total nonzeros elements Scalar *m_values; //Values stored block column after block column (size m_nonzeros) StorageIndex *m_blockPtr; // Pointer to the beginning of each block in m_values, size m_nonzeroblocks ... null for fixed-size blocks StorageIndex *m_indices; //Inner block indices, size m_nonzerosblocks ... OK StorageIndex *m_outerIndex; // Starting pointer of each block column in m_indices (size m_outerBSize)... OK Index m_blockSize; // Size of a block for fixed-size blocks, otherwise -1 }; template class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::BlockInnerIterator { public: enum{ Flags = _Options }; BlockInnerIterator(const BlockSparseMatrix& mat, const Index outer) : m_mat(mat),m_outer(outer), m_id(mat.m_outerIndex[outer]), m_end(mat.m_outerIndex[outer+1]) { } inline BlockInnerIterator& operator++() {m_id++; return *this; } inline const Map value() const { return Map(&(m_mat.m_values[m_mat.blockPtr(m_id)]), rows(),cols()); } inline Map valueRef() { return Map(&(m_mat.m_values[m_mat.blockPtr(m_id)]), rows(),cols()); } // Block inner index inline Index index() const {return m_mat.m_indices[m_id]; } inline Index outer() const { return m_outer; } // block row index inline Index row() const {return index(); } // block column index inline Index col() const {return outer(); } // FIXME Number of rows in the current block inline Index rows() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_innerOffset[index()+1] - m_mat.m_innerOffset[index()]) : m_mat.m_blockSize; } // Number of columns in the current block ... inline Index cols() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_outerOffset[m_outer+1]-m_mat.m_outerOffset[m_outer]) : m_mat.m_blockSize;} inline operator bool() const { return (m_id < m_end); } protected: const BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, StorageIndex>& m_mat; const Index m_outer; Index m_id; Index m_end; }; template class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::InnerIterator { public: InnerIterator(const BlockSparseMatrix& mat, Index outer) : m_mat(mat),m_outerB(mat.outerToBlock(outer)),m_outer(outer), itb(mat, mat.outerToBlock(outer)), m_offset(outer - mat.blockOuterIndex(m_outerB)) { if (itb) { m_id = m_mat.blockInnerIndex(itb.index()); m_start = m_id; m_end = m_mat.blockInnerIndex(itb.index()+1); } } inline InnerIterator& operator++() { m_id++; if (m_id >= m_end) { ++itb; if (itb) { m_id = m_mat.blockInnerIndex(itb.index()); m_start = m_id; m_end = m_mat.blockInnerIndex(itb.index()+1); } } return *this; } inline const Scalar& value() const { return itb.value().coeff(m_id - m_start, m_offset); } inline Scalar& valueRef() { return itb.valueRef().coeff(m_id - m_start, m_offset); } inline Index index() const { return m_id; } inline Index outer() const {return m_outer; } inline Index col() const {return outer(); } inline Index row() const { return index();} inline operator bool() const { return itb; } protected: const BlockSparseMatrix& m_mat; const Index m_outer; const Index m_outerB; BlockInnerIterator itb; // Iterator through the blocks const Index m_offset; // Position of this column in the block Index m_start; // starting inner index of this block Index m_id; // current inner index in the block Index m_end; // starting inner index of the next block }; } // end namespace Eigen #endif // EIGEN_SPARSEBLOCKMATRIX_H RcppEigen/inst/include/unsupported/Eigen/src/EulerAngles/0000755000176200001440000000000013100733213023133 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/EulerAngles/EulerAngles.h0000644000176200001440000004056313100733213025522 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Tal Hadad // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_EULERANGLESCLASS_H// TODO: Fix previous "EIGEN_EULERANGLES_H" definition? #define EIGEN_EULERANGLESCLASS_H namespace Eigen { /*template struct ei_eulerangles_assign_impl;*/ /** \class EulerAngles * * \ingroup EulerAngles_Module * * \brief Represents a rotation in a 3 dimensional space as three Euler angles. * * Euler rotation is a set of three rotation of three angles over three fixed axes, defined by the EulerSystem given as a template parameter. * * Here is how intrinsic Euler angles works: * - first, rotate the axes system over the alpha axis in angle alpha * - then, rotate the axes system over the beta axis(which was rotated in the first stage) in angle beta * - then, rotate the axes system over the gamma axis(which was rotated in the two stages above) in angle gamma * * \note This class support only intrinsic Euler angles for simplicity, * see EulerSystem how to easily overcome this for extrinsic systems. * * ### Rotation representation and conversions ### * * It has been proved(see Wikipedia link below) that every rotation can be represented * by Euler angles, but there is no singular representation (e.g. unlike rotation matrices). * Therefore, you can convert from Eigen rotation and to them * (including rotation matrices, which is not called "rotations" by Eigen design). * * Euler angles usually used for: * - convenient human representation of rotation, especially in interactive GUI. * - gimbal systems and robotics * - efficient encoding(i.e. 3 floats only) of rotation for network protocols. * * However, Euler angles are slow comparing to quaternion or matrices, * because their unnatural math definition, although it's simple for human. * To overcome this, this class provide easy movement from the math friendly representation * to the human friendly representation, and vise-versa. * * All the user need to do is a safe simple C++ type conversion, * and this class take care for the math. * Additionally, some axes related computation is done in compile time. * * #### Euler angles ranges in conversions #### * * When converting some rotation to Euler angles, there are some ways you can guarantee * the Euler angles ranges. * * #### implicit ranges #### * When using implicit ranges, all angles are guarantee to be in the range [-PI, +PI], * unless you convert from some other Euler angles. * In this case, the range is __undefined__ (might be even less than -PI or greater than +2*PI). * \sa EulerAngles(const MatrixBase&) * \sa EulerAngles(const RotationBase&) * * #### explicit ranges #### * When using explicit ranges, all angles are guarantee to be in the range you choose. * In the range Boolean parameter, you're been ask whether you prefer the positive range or not: * - _true_ - force the range between [0, +2*PI] * - _false_ - force the range between [-PI, +PI] * * ##### compile time ranges ##### * This is when you have compile time ranges and you prefer to * use template parameter. (e.g. for performance) * \sa FromRotation() * * ##### run-time time ranges ##### * Run-time ranges are also supported. * \sa EulerAngles(const MatrixBase&, bool, bool, bool) * \sa EulerAngles(const RotationBase&, bool, bool, bool) * * ### Convenient user typedefs ### * * Convenient typedefs for EulerAngles exist for float and double scalar, * in a form of EulerAngles{A}{B}{C}{scalar}, * e.g. \ref EulerAnglesXYZd, \ref EulerAnglesZYZf. * * Only for positive axes{+x,+y,+z} Euler systems are have convenient typedef. * If you need negative axes{-x,-y,-z}, it is recommended to create you own typedef with * a word that represent what you need. * * ### Example ### * * \include EulerAngles.cpp * Output: \verbinclude EulerAngles.out * * ### Additional reading ### * * If you're want to get more idea about how Euler system work in Eigen see EulerSystem. * * More information about Euler angles: https://en.wikipedia.org/wiki/Euler_angles * * \tparam _Scalar the scalar type, i.e., the type of the angles. * * \tparam _System the EulerSystem to use, which represents the axes of rotation. */ template class EulerAngles : public RotationBase, 3> { public: /** the scalar type of the angles */ typedef _Scalar Scalar; /** the EulerSystem to use, which represents the axes of rotation. */ typedef _System System; typedef Matrix Matrix3; /*!< the equivalent rotation matrix type */ typedef Matrix Vector3; /*!< the equivalent 3 dimension vector type */ typedef Quaternion QuaternionType; /*!< the equivalent quaternion type */ typedef AngleAxis AngleAxisType; /*!< the equivalent angle-axis type */ /** \returns the axis vector of the first (alpha) rotation */ static Vector3 AlphaAxisVector() { const Vector3& u = Vector3::Unit(System::AlphaAxisAbs - 1); return System::IsAlphaOpposite ? -u : u; } /** \returns the axis vector of the second (beta) rotation */ static Vector3 BetaAxisVector() { const Vector3& u = Vector3::Unit(System::BetaAxisAbs - 1); return System::IsBetaOpposite ? -u : u; } /** \returns the axis vector of the third (gamma) rotation */ static Vector3 GammaAxisVector() { const Vector3& u = Vector3::Unit(System::GammaAxisAbs - 1); return System::IsGammaOpposite ? -u : u; } private: Vector3 m_angles; public: /** Default constructor without initialization. */ EulerAngles() {} /** Constructs and initialize Euler angles(\p alpha, \p beta, \p gamma). */ EulerAngles(const Scalar& alpha, const Scalar& beta, const Scalar& gamma) : m_angles(alpha, beta, gamma) {} /** Constructs and initialize Euler angles from a 3x3 rotation matrix \p m. * * \note All angles will be in the range [-PI, PI]. */ template EulerAngles(const MatrixBase& m) { *this = m; } /** Constructs and initialize Euler angles from a 3x3 rotation matrix \p m, * with options to choose for each angle the requested range. * * If positive range is true, then the specified angle will be in the range [0, +2*PI]. * Otherwise, the specified angle will be in the range [-PI, +PI]. * * \param m The 3x3 rotation matrix to convert * \param positiveRangeAlpha If true, alpha will be in [0, 2*PI]. Otherwise, in [-PI, +PI]. * \param positiveRangeBeta If true, beta will be in [0, 2*PI]. Otherwise, in [-PI, +PI]. * \param positiveRangeGamma If true, gamma will be in [0, 2*PI]. Otherwise, in [-PI, +PI]. */ template EulerAngles( const MatrixBase& m, bool positiveRangeAlpha, bool positiveRangeBeta, bool positiveRangeGamma) { System::CalcEulerAngles(*this, m, positiveRangeAlpha, positiveRangeBeta, positiveRangeGamma); } /** Constructs and initialize Euler angles from a rotation \p rot. * * \note All angles will be in the range [-PI, PI], unless \p rot is an EulerAngles. * If rot is an EulerAngles, expected EulerAngles range is __undefined__. * (Use other functions here for enforcing range if this effect is desired) */ template EulerAngles(const RotationBase& rot) { *this = rot; } /** Constructs and initialize Euler angles from a rotation \p rot, * with options to choose for each angle the requested range. * * If positive range is true, then the specified angle will be in the range [0, +2*PI]. * Otherwise, the specified angle will be in the range [-PI, +PI]. * * \param rot The 3x3 rotation matrix to convert * \param positiveRangeAlpha If true, alpha will be in [0, 2*PI]. Otherwise, in [-PI, +PI]. * \param positiveRangeBeta If true, beta will be in [0, 2*PI]. Otherwise, in [-PI, +PI]. * \param positiveRangeGamma If true, gamma will be in [0, 2*PI]. Otherwise, in [-PI, +PI]. */ template EulerAngles( const RotationBase& rot, bool positiveRangeAlpha, bool positiveRangeBeta, bool positiveRangeGamma) { System::CalcEulerAngles(*this, rot.toRotationMatrix(), positiveRangeAlpha, positiveRangeBeta, positiveRangeGamma); } /** \returns The angle values stored in a vector (alpha, beta, gamma). */ const Vector3& angles() const { return m_angles; } /** \returns A read-write reference to the angle values stored in a vector (alpha, beta, gamma). */ Vector3& angles() { return m_angles; } /** \returns The value of the first angle. */ Scalar alpha() const { return m_angles[0]; } /** \returns A read-write reference to the angle of the first angle. */ Scalar& alpha() { return m_angles[0]; } /** \returns The value of the second angle. */ Scalar beta() const { return m_angles[1]; } /** \returns A read-write reference to the angle of the second angle. */ Scalar& beta() { return m_angles[1]; } /** \returns The value of the third angle. */ Scalar gamma() const { return m_angles[2]; } /** \returns A read-write reference to the angle of the third angle. */ Scalar& gamma() { return m_angles[2]; } /** \returns The Euler angles rotation inverse (which is as same as the negative), * (-alpha, -beta, -gamma). */ EulerAngles inverse() const { EulerAngles res; res.m_angles = -m_angles; return res; } /** \returns The Euler angles rotation negative (which is as same as the inverse), * (-alpha, -beta, -gamma). */ EulerAngles operator -() const { return inverse(); } /** Constructs and initialize Euler angles from a 3x3 rotation matrix \p m, * with options to choose for each angle the requested range (__only in compile time__). * * If positive range is true, then the specified angle will be in the range [0, +2*PI]. * Otherwise, the specified angle will be in the range [-PI, +PI]. * * \param m The 3x3 rotation matrix to convert * \tparam positiveRangeAlpha If true, alpha will be in [0, 2*PI]. Otherwise, in [-PI, +PI]. * \tparam positiveRangeBeta If true, beta will be in [0, 2*PI]. Otherwise, in [-PI, +PI]. * \tparam positiveRangeGamma If true, gamma will be in [0, 2*PI]. Otherwise, in [-PI, +PI]. */ template< bool PositiveRangeAlpha, bool PositiveRangeBeta, bool PositiveRangeGamma, typename Derived> static EulerAngles FromRotation(const MatrixBase& m) { EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived, 3, 3) EulerAngles e; System::template CalcEulerAngles< PositiveRangeAlpha, PositiveRangeBeta, PositiveRangeGamma, _Scalar>(e, m); return e; } /** Constructs and initialize Euler angles from a rotation \p rot, * with options to choose for each angle the requested range (__only in compile time__). * * If positive range is true, then the specified angle will be in the range [0, +2*PI]. * Otherwise, the specified angle will be in the range [-PI, +PI]. * * \param rot The 3x3 rotation matrix to convert * \tparam positiveRangeAlpha If true, alpha will be in [0, 2*PI]. Otherwise, in [-PI, +PI]. * \tparam positiveRangeBeta If true, beta will be in [0, 2*PI]. Otherwise, in [-PI, +PI]. * \tparam positiveRangeGamma If true, gamma will be in [0, 2*PI]. Otherwise, in [-PI, +PI]. */ template< bool PositiveRangeAlpha, bool PositiveRangeBeta, bool PositiveRangeGamma, typename Derived> static EulerAngles FromRotation(const RotationBase& rot) { return FromRotation(rot.toRotationMatrix()); } /*EulerAngles& fromQuaternion(const QuaternionType& q) { // TODO: Implement it in a faster way for quaternions // According to http://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToEuler/ // we can compute only the needed matrix cells and then convert to euler angles. (see ZYX example below) // Currently we compute all matrix cells from quaternion. // Special case only for ZYX //Scalar y2 = q.y() * q.y(); //m_angles[0] = std::atan2(2*(q.w()*q.z() + q.x()*q.y()), (1 - 2*(y2 + q.z()*q.z()))); //m_angles[1] = std::asin( 2*(q.w()*q.y() - q.z()*q.x())); //m_angles[2] = std::atan2(2*(q.w()*q.x() + q.y()*q.z()), (1 - 2*(q.x()*q.x() + y2))); }*/ /** Set \c *this from a rotation matrix(i.e. pure orthogonal matrix with determinant of +1). */ template EulerAngles& operator=(const MatrixBase& m) { EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived, 3, 3) System::CalcEulerAngles(*this, m); return *this; } // TODO: Assign and construct from another EulerAngles (with different system) /** Set \c *this from a rotation. */ template EulerAngles& operator=(const RotationBase& rot) { System::CalcEulerAngles(*this, rot.toRotationMatrix()); return *this; } // TODO: Support isApprox function /** \returns an equivalent 3x3 rotation matrix. */ Matrix3 toRotationMatrix() const { return static_cast(*this).toRotationMatrix(); } /** Convert the Euler angles to quaternion. */ operator QuaternionType() const { return AngleAxisType(alpha(), AlphaAxisVector()) * AngleAxisType(beta(), BetaAxisVector()) * AngleAxisType(gamma(), GammaAxisVector()); } friend std::ostream& operator<<(std::ostream& s, const EulerAngles& eulerAngles) { s << eulerAngles.angles().transpose(); return s; } }; #define EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(AXES, SCALAR_TYPE, SCALAR_POSTFIX) \ /** \ingroup EulerAngles_Module */ \ typedef EulerAngles EulerAngles##AXES##SCALAR_POSTFIX; #define EIGEN_EULER_ANGLES_TYPEDEFS(SCALAR_TYPE, SCALAR_POSTFIX) \ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(XYZ, SCALAR_TYPE, SCALAR_POSTFIX) \ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(XYX, SCALAR_TYPE, SCALAR_POSTFIX) \ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(XZY, SCALAR_TYPE, SCALAR_POSTFIX) \ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(XZX, SCALAR_TYPE, SCALAR_POSTFIX) \ \ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(YZX, SCALAR_TYPE, SCALAR_POSTFIX) \ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(YZY, SCALAR_TYPE, SCALAR_POSTFIX) \ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(YXZ, SCALAR_TYPE, SCALAR_POSTFIX) \ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(YXY, SCALAR_TYPE, SCALAR_POSTFIX) \ \ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(ZXY, SCALAR_TYPE, SCALAR_POSTFIX) \ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(ZXZ, SCALAR_TYPE, SCALAR_POSTFIX) \ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(ZYX, SCALAR_TYPE, SCALAR_POSTFIX) \ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(ZYZ, SCALAR_TYPE, SCALAR_POSTFIX) EIGEN_EULER_ANGLES_TYPEDEFS(float, f) EIGEN_EULER_ANGLES_TYPEDEFS(double, d) namespace internal { template struct traits > { typedef _Scalar Scalar; }; } } #endif // EIGEN_EULERANGLESCLASS_H RcppEigen/inst/include/unsupported/Eigen/src/EulerAngles/EulerSystem.h0000644000176200001440000002631313100733213025572 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Tal Hadad // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_EULERSYSTEM_H #define EIGEN_EULERSYSTEM_H namespace Eigen { // Forward declerations template class EulerAngles; namespace internal { // TODO: Check if already exists on the rest API template 0)> struct Abs { enum { value = Num }; }; template struct Abs { enum { value = -Num }; }; template struct IsValidAxis { enum { value = Axis != 0 && Abs::value <= 3 }; }; } #define EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT(COND,MSG) typedef char static_assertion_##MSG[(COND)?1:-1] /** \brief Representation of a fixed signed rotation axis for EulerSystem. * * \ingroup EulerAngles_Module * * Values here represent: * - The axis of the rotation: X, Y or Z. * - The sign (i.e. direction of the rotation along the axis): positive(+) or negative(-) * * Therefore, this could express all the axes {+X,+Y,+Z,-X,-Y,-Z} * * For positive axis, use +EULER_{axis}, and for negative axis use -EULER_{axis}. */ enum EulerAxis { EULER_X = 1, /*!< the X axis */ EULER_Y = 2, /*!< the Y axis */ EULER_Z = 3 /*!< the Z axis */ }; /** \class EulerSystem * * \ingroup EulerAngles_Module * * \brief Represents a fixed Euler rotation system. * * This meta-class goal is to represent the Euler system in compilation time, for EulerAngles. * * You can use this class to get two things: * - Build an Euler system, and then pass it as a template parameter to EulerAngles. * - Query some compile time data about an Euler system. (e.g. Whether it's tait bryan) * * Euler rotation is a set of three rotation on fixed axes. (see \ref EulerAngles) * This meta-class store constantly those signed axes. (see \ref EulerAxis) * * ### Types of Euler systems ### * * All and only valid 3 dimension Euler rotation over standard * signed axes{+X,+Y,+Z,-X,-Y,-Z} are supported: * - all axes X, Y, Z in each valid order (see below what order is valid) * - rotation over the axis is supported both over the positive and negative directions. * - both tait bryan and proper/classic Euler angles (i.e. the opposite). * * Since EulerSystem support both positive and negative directions, * you may call this rotation distinction in other names: * - _right handed_ or _left handed_ * - _counterclockwise_ or _clockwise_ * * Notice all axed combination are valid, and would trigger a static assertion. * Same unsigned axes can't be neighbors, e.g. {X,X,Y} is invalid. * This yield two and only two classes: * - _tait bryan_ - all unsigned axes are distinct, e.g. {X,Y,Z} * - _proper/classic Euler angles_ - The first and the third unsigned axes is equal, * and the second is different, e.g. {X,Y,X} * * ### Intrinsic vs extrinsic Euler systems ### * * Only intrinsic Euler systems are supported for simplicity. * If you want to use extrinsic Euler systems, * just use the equal intrinsic opposite order for axes and angles. * I.e axes (A,B,C) becomes (C,B,A), and angles (a,b,c) becomes (c,b,a). * * ### Convenient user typedefs ### * * Convenient typedefs for EulerSystem exist (only for positive axes Euler systems), * in a form of EulerSystem{A}{B}{C}, e.g. \ref EulerSystemXYZ. * * ### Additional reading ### * * More information about Euler angles: https://en.wikipedia.org/wiki/Euler_angles * * \tparam _AlphaAxis the first fixed EulerAxis * * \tparam _AlphaAxis the second fixed EulerAxis * * \tparam _AlphaAxis the third fixed EulerAxis */ template class EulerSystem { public: // It's defined this way and not as enum, because I think // that enum is not guerantee to support negative numbers /** The first rotation axis */ static const int AlphaAxis = _AlphaAxis; /** The second rotation axis */ static const int BetaAxis = _BetaAxis; /** The third rotation axis */ static const int GammaAxis = _GammaAxis; enum { AlphaAxisAbs = internal::Abs::value, /*!< the first rotation axis unsigned */ BetaAxisAbs = internal::Abs::value, /*!< the second rotation axis unsigned */ GammaAxisAbs = internal::Abs::value, /*!< the third rotation axis unsigned */ IsAlphaOpposite = (AlphaAxis < 0) ? 1 : 0, /*!< weather alpha axis is negative */ IsBetaOpposite = (BetaAxis < 0) ? 1 : 0, /*!< weather beta axis is negative */ IsGammaOpposite = (GammaAxis < 0) ? 1 : 0, /*!< weather gamma axis is negative */ IsOdd = ((AlphaAxisAbs)%3 == (BetaAxisAbs - 1)%3) ? 0 : 1, /*!< weather the Euler system is odd */ IsEven = IsOdd ? 0 : 1, /*!< weather the Euler system is even */ IsTaitBryan = ((unsigned)AlphaAxisAbs != (unsigned)GammaAxisAbs) ? 1 : 0 /*!< weather the Euler system is tait bryan */ }; private: EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT(internal::IsValidAxis::value, ALPHA_AXIS_IS_INVALID); EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT(internal::IsValidAxis::value, BETA_AXIS_IS_INVALID); EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT(internal::IsValidAxis::value, GAMMA_AXIS_IS_INVALID); EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT((unsigned)AlphaAxisAbs != (unsigned)BetaAxisAbs, ALPHA_AXIS_CANT_BE_EQUAL_TO_BETA_AXIS); EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT((unsigned)BetaAxisAbs != (unsigned)GammaAxisAbs, BETA_AXIS_CANT_BE_EQUAL_TO_GAMMA_AXIS); enum { // I, J, K are the pivot indexes permutation for the rotation matrix, that match this Euler system. // They are used in this class converters. // They are always different from each other, and their possible values are: 0, 1, or 2. I = AlphaAxisAbs - 1, J = (AlphaAxisAbs - 1 + 1 + IsOdd)%3, K = (AlphaAxisAbs - 1 + 2 - IsOdd)%3 }; // TODO: Get @mat parameter in form that avoids double evaluation. template static void CalcEulerAngles_imp(Matrix::Scalar, 3, 1>& res, const MatrixBase& mat, internal::true_type /*isTaitBryan*/) { using std::atan2; using std::sin; using std::cos; typedef typename Derived::Scalar Scalar; typedef Matrix Vector2; res[0] = atan2(mat(J,K), mat(K,K)); Scalar c2 = Vector2(mat(I,I), mat(I,J)).norm(); if((IsOdd && res[0]Scalar(0))) { if(res[0] > Scalar(0)) { res[0] -= Scalar(EIGEN_PI); } else { res[0] += Scalar(EIGEN_PI); } res[1] = atan2(-mat(I,K), -c2); } else res[1] = atan2(-mat(I,K), c2); Scalar s1 = sin(res[0]); Scalar c1 = cos(res[0]); res[2] = atan2(s1*mat(K,I)-c1*mat(J,I), c1*mat(J,J) - s1 * mat(K,J)); } template static void CalcEulerAngles_imp(Matrix::Scalar,3,1>& res, const MatrixBase& mat, internal::false_type /*isTaitBryan*/) { using std::atan2; using std::sin; using std::cos; typedef typename Derived::Scalar Scalar; typedef Matrix Vector2; res[0] = atan2(mat(J,I), mat(K,I)); if((IsOdd && res[0]Scalar(0))) { if(res[0] > Scalar(0)) { res[0] -= Scalar(EIGEN_PI); } else { res[0] += Scalar(EIGEN_PI); } Scalar s2 = Vector2(mat(J,I), mat(K,I)).norm(); res[1] = -atan2(s2, mat(I,I)); } else { Scalar s2 = Vector2(mat(J,I), mat(K,I)).norm(); res[1] = atan2(s2, mat(I,I)); } // With a=(0,1,0), we have i=0; j=1; k=2, and after computing the first two angles, // we can compute their respective rotation, and apply its inverse to M. Since the result must // be a rotation around x, we have: // // c2 s1.s2 c1.s2 1 0 0 // 0 c1 -s1 * M = 0 c3 s3 // -s2 s1.c2 c1.c2 0 -s3 c3 // // Thus: m11.c1 - m21.s1 = c3 & m12.c1 - m22.s1 = s3 Scalar s1 = sin(res[0]); Scalar c1 = cos(res[0]); res[2] = atan2(c1*mat(J,K)-s1*mat(K,K), c1*mat(J,J) - s1 * mat(K,J)); } template static void CalcEulerAngles( EulerAngles& res, const typename EulerAngles::Matrix3& mat) { CalcEulerAngles(res, mat, false, false, false); } template< bool PositiveRangeAlpha, bool PositiveRangeBeta, bool PositiveRangeGamma, typename Scalar> static void CalcEulerAngles( EulerAngles& res, const typename EulerAngles::Matrix3& mat) { CalcEulerAngles(res, mat, PositiveRangeAlpha, PositiveRangeBeta, PositiveRangeGamma); } template static void CalcEulerAngles( EulerAngles& res, const typename EulerAngles::Matrix3& mat, bool PositiveRangeAlpha, bool PositiveRangeBeta, bool PositiveRangeGamma) { CalcEulerAngles_imp( res.angles(), mat, typename internal::conditional::type()); if (IsAlphaOpposite == IsOdd) res.alpha() = -res.alpha(); if (IsBetaOpposite == IsOdd) res.beta() = -res.beta(); if (IsGammaOpposite == IsOdd) res.gamma() = -res.gamma(); // Saturate results to the requested range if (PositiveRangeAlpha && (res.alpha() < 0)) res.alpha() += Scalar(2 * EIGEN_PI); if (PositiveRangeBeta && (res.beta() < 0)) res.beta() += Scalar(2 * EIGEN_PI); if (PositiveRangeGamma && (res.gamma() < 0)) res.gamma() += Scalar(2 * EIGEN_PI); } template friend class Eigen::EulerAngles; }; #define EIGEN_EULER_SYSTEM_TYPEDEF(A, B, C) \ /** \ingroup EulerAngles_Module */ \ typedef EulerSystem EulerSystem##A##B##C; EIGEN_EULER_SYSTEM_TYPEDEF(X,Y,Z) EIGEN_EULER_SYSTEM_TYPEDEF(X,Y,X) EIGEN_EULER_SYSTEM_TYPEDEF(X,Z,Y) EIGEN_EULER_SYSTEM_TYPEDEF(X,Z,X) EIGEN_EULER_SYSTEM_TYPEDEF(Y,Z,X) EIGEN_EULER_SYSTEM_TYPEDEF(Y,Z,Y) EIGEN_EULER_SYSTEM_TYPEDEF(Y,X,Z) EIGEN_EULER_SYSTEM_TYPEDEF(Y,X,Y) EIGEN_EULER_SYSTEM_TYPEDEF(Z,X,Y) EIGEN_EULER_SYSTEM_TYPEDEF(Z,X,Z) EIGEN_EULER_SYSTEM_TYPEDEF(Z,Y,X) EIGEN_EULER_SYSTEM_TYPEDEF(Z,Y,Z) } #endif // EIGEN_EULERSYSTEM_H RcppEigen/inst/include/unsupported/Eigen/src/Skyline/0000755000176200001440000000000013100733213022343 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/Skyline/SkylineProduct.h0000644000176200001440000002514513100733213025502 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Guillaume Saupin // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SKYLINEPRODUCT_H #define EIGEN_SKYLINEPRODUCT_H namespace Eigen { template struct SkylineProductReturnType { typedef const typename internal::nested_eval::type LhsNested; typedef const typename internal::nested_eval::type RhsNested; typedef SkylineProduct Type; }; template struct internal::traits > { // clean the nested types: typedef typename internal::remove_all::type _LhsNested; typedef typename internal::remove_all::type _RhsNested; typedef typename _LhsNested::Scalar Scalar; enum { LhsCoeffReadCost = _LhsNested::CoeffReadCost, RhsCoeffReadCost = _RhsNested::CoeffReadCost, LhsFlags = _LhsNested::Flags, RhsFlags = _RhsNested::Flags, RowsAtCompileTime = _LhsNested::RowsAtCompileTime, ColsAtCompileTime = _RhsNested::ColsAtCompileTime, InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime), MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime, MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime, EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit), ResultIsSkyline = ProductMode == SkylineTimeSkylineProduct, RemovedBits = ~((EvalToRowMajor ? 0 : RowMajorBit) | (ResultIsSkyline ? 0 : SkylineBit)), Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits) | EvalBeforeAssigningBit | EvalBeforeNestingBit, CoeffReadCost = HugeCost }; typedef typename internal::conditional >, MatrixBase > >::type Base; }; namespace internal { template class SkylineProduct : no_assignment_operator, public traits >::Base { public: EIGEN_GENERIC_PUBLIC_INTERFACE(SkylineProduct) private: typedef typename traits::_LhsNested _LhsNested; typedef typename traits::_RhsNested _RhsNested; public: template EIGEN_STRONG_INLINE SkylineProduct(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) { eigen_assert(lhs.cols() == rhs.rows()); enum { ProductIsValid = _LhsNested::ColsAtCompileTime == Dynamic || _RhsNested::RowsAtCompileTime == Dynamic || int(_LhsNested::ColsAtCompileTime) == int(_RhsNested::RowsAtCompileTime), AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime, SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested, _RhsNested) }; // note to the lost user: // * for a dot product use: v1.dot(v2) // * for a coeff-wise product use: v1.cwise()*v2 EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) } EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); } EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); } EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } protected: LhsNested m_lhs; RhsNested m_rhs; }; // dense = skyline * dense // Note that here we force no inlining and separate the setZero() because GCC messes up otherwise template EIGEN_DONT_INLINE void skyline_row_major_time_dense_product(const Lhs& lhs, const Rhs& rhs, Dest& dst) { typedef typename remove_all::type _Lhs; typedef typename remove_all::type _Rhs; typedef typename traits::Scalar Scalar; enum { LhsIsRowMajor = (_Lhs::Flags & RowMajorBit) == RowMajorBit, LhsIsSelfAdjoint = (_Lhs::Flags & SelfAdjointBit) == SelfAdjointBit, ProcessFirstHalf = LhsIsSelfAdjoint && (((_Lhs::Flags & (UpperTriangularBit | LowerTriangularBit)) == 0) || ((_Lhs::Flags & UpperTriangularBit) && !LhsIsRowMajor) || ((_Lhs::Flags & LowerTriangularBit) && LhsIsRowMajor)), ProcessSecondHalf = LhsIsSelfAdjoint && (!ProcessFirstHalf) }; //Use matrix diagonal part <- Improvement : use inner iterator on dense matrix. for (Index col = 0; col < rhs.cols(); col++) { for (Index row = 0; row < lhs.rows(); row++) { dst(row, col) = lhs.coeffDiag(row) * rhs(row, col); } } //Use matrix lower triangular part for (Index row = 0; row < lhs.rows(); row++) { typename _Lhs::InnerLowerIterator lIt(lhs, row); const Index stop = lIt.col() + lIt.size(); for (Index col = 0; col < rhs.cols(); col++) { Index k = lIt.col(); Scalar tmp = 0; while (k < stop) { tmp += lIt.value() * rhs(k++, col); ++lIt; } dst(row, col) += tmp; lIt += -lIt.size(); } } //Use matrix upper triangular part for (Index lhscol = 0; lhscol < lhs.cols(); lhscol++) { typename _Lhs::InnerUpperIterator uIt(lhs, lhscol); const Index stop = uIt.size() + uIt.row(); for (Index rhscol = 0; rhscol < rhs.cols(); rhscol++) { const Scalar rhsCoeff = rhs.coeff(lhscol, rhscol); Index k = uIt.row(); while (k < stop) { dst(k++, rhscol) += uIt.value() * rhsCoeff; ++uIt; } uIt += -uIt.size(); } } } template EIGEN_DONT_INLINE void skyline_col_major_time_dense_product(const Lhs& lhs, const Rhs& rhs, Dest& dst) { typedef typename remove_all::type _Lhs; typedef typename remove_all::type _Rhs; typedef typename traits::Scalar Scalar; enum { LhsIsRowMajor = (_Lhs::Flags & RowMajorBit) == RowMajorBit, LhsIsSelfAdjoint = (_Lhs::Flags & SelfAdjointBit) == SelfAdjointBit, ProcessFirstHalf = LhsIsSelfAdjoint && (((_Lhs::Flags & (UpperTriangularBit | LowerTriangularBit)) == 0) || ((_Lhs::Flags & UpperTriangularBit) && !LhsIsRowMajor) || ((_Lhs::Flags & LowerTriangularBit) && LhsIsRowMajor)), ProcessSecondHalf = LhsIsSelfAdjoint && (!ProcessFirstHalf) }; //Use matrix diagonal part <- Improvement : use inner iterator on dense matrix. for (Index col = 0; col < rhs.cols(); col++) { for (Index row = 0; row < lhs.rows(); row++) { dst(row, col) = lhs.coeffDiag(row) * rhs(row, col); } } //Use matrix upper triangular part for (Index row = 0; row < lhs.rows(); row++) { typename _Lhs::InnerUpperIterator uIt(lhs, row); const Index stop = uIt.col() + uIt.size(); for (Index col = 0; col < rhs.cols(); col++) { Index k = uIt.col(); Scalar tmp = 0; while (k < stop) { tmp += uIt.value() * rhs(k++, col); ++uIt; } dst(row, col) += tmp; uIt += -uIt.size(); } } //Use matrix lower triangular part for (Index lhscol = 0; lhscol < lhs.cols(); lhscol++) { typename _Lhs::InnerLowerIterator lIt(lhs, lhscol); const Index stop = lIt.size() + lIt.row(); for (Index rhscol = 0; rhscol < rhs.cols(); rhscol++) { const Scalar rhsCoeff = rhs.coeff(lhscol, rhscol); Index k = lIt.row(); while (k < stop) { dst(k++, rhscol) += lIt.value() * rhsCoeff; ++lIt; } lIt += -lIt.size(); } } } template::Flags&RowMajorBit> struct skyline_product_selector; template struct skyline_product_selector { typedef typename traits::type>::Scalar Scalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType & res) { skyline_row_major_time_dense_product (lhs, rhs, res); } }; template struct skyline_product_selector { typedef typename traits::type>::Scalar Scalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType & res) { skyline_col_major_time_dense_product (lhs, rhs, res); } }; } // end namespace internal // template // template // Derived & MatrixBase::lazyAssign(const SkylineProduct& product) { // typedef typename internal::remove_all::type _Lhs; // internal::skyline_product_selector::type, // typename internal::remove_all::type, // Derived>::run(product.lhs(), product.rhs(), derived()); // // return derived(); // } // skyline * dense template template EIGEN_STRONG_INLINE const typename SkylineProductReturnType::Type SkylineMatrixBase::operator*(const MatrixBase &other) const { return typename SkylineProductReturnType::Type(derived(), other.derived()); } } // end namespace Eigen #endif // EIGEN_SKYLINEPRODUCT_H RcppEigen/inst/include/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h0000644000176200001440000002613612253717461025716 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Guillaume Saupin // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SKYLINEINPLACELU_H #define EIGEN_SKYLINEINPLACELU_H namespace Eigen { /** \ingroup Skyline_Module * * \class SkylineInplaceLU * * \brief Inplace LU decomposition of a skyline matrix and associated features * * \param MatrixType the type of the matrix of which we are computing the LU factorization * */ template class SkylineInplaceLU { protected: typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; typedef typename NumTraits::Real RealScalar; public: /** Creates a LU object and compute the respective factorization of \a matrix using * flags \a flags. */ SkylineInplaceLU(MatrixType& matrix, int flags = 0) : /*m_matrix(matrix.rows(), matrix.cols()),*/ m_flags(flags), m_status(0), m_lu(matrix) { m_precision = RealScalar(0.1) * Eigen::dummy_precision (); m_lu.IsRowMajor ? computeRowMajor() : compute(); } /** Sets the relative threshold value used to prune zero coefficients during the decomposition. * * Setting a value greater than zero speeds up computation, and yields to an imcomplete * factorization with fewer non zero coefficients. Such approximate factors are especially * useful to initialize an iterative solver. * * Note that the exact meaning of this parameter might depends on the actual * backend. Moreover, not all backends support this feature. * * \sa precision() */ void setPrecision(RealScalar v) { m_precision = v; } /** \returns the current precision. * * \sa setPrecision() */ RealScalar precision() const { return m_precision; } /** Sets the flags. Possible values are: * - CompleteFactorization * - IncompleteFactorization * - MemoryEfficient * - one of the ordering methods * - etc... * * \sa flags() */ void setFlags(int f) { m_flags = f; } /** \returns the current flags */ int flags() const { return m_flags; } void setOrderingMethod(int m) { m_flags = m; } int orderingMethod() const { return m_flags; } /** Computes/re-computes the LU factorization */ void compute(); void computeRowMajor(); /** \returns the lower triangular matrix L */ //inline const MatrixType& matrixL() const { return m_matrixL; } /** \returns the upper triangular matrix U */ //inline const MatrixType& matrixU() const { return m_matrixU; } template bool solve(const MatrixBase &b, MatrixBase* x, const int transposed = 0) const; /** \returns true if the factorization succeeded */ inline bool succeeded(void) const { return m_succeeded; } protected: RealScalar m_precision; int m_flags; mutable int m_status; bool m_succeeded; MatrixType& m_lu; }; /** Computes / recomputes the in place LU decomposition of the SkylineInplaceLU. * using the default algorithm. */ template //template void SkylineInplaceLU::compute() { const size_t rows = m_lu.rows(); const size_t cols = m_lu.cols(); eigen_assert(rows == cols && "We do not (yet) support rectangular LU."); eigen_assert(!m_lu.IsRowMajor && "LU decomposition does not work with rowMajor Storage"); for (Index row = 0; row < rows; row++) { const double pivot = m_lu.coeffDiag(row); //Lower matrix Columns update const Index& col = row; for (typename MatrixType::InnerLowerIterator lIt(m_lu, col); lIt; ++lIt) { lIt.valueRef() /= pivot; } //Upper matrix update -> contiguous memory access typename MatrixType::InnerLowerIterator lIt(m_lu, col); for (Index rrow = row + 1; rrow < m_lu.rows(); rrow++) { typename MatrixType::InnerUpperIterator uItPivot(m_lu, row); typename MatrixType::InnerUpperIterator uIt(m_lu, rrow); const double coef = lIt.value(); uItPivot += (rrow - row - 1); //update upper part -> contiguous memory access for (++uItPivot; uIt && uItPivot;) { uIt.valueRef() -= uItPivot.value() * coef; ++uIt; ++uItPivot; } ++lIt; } //Upper matrix update -> non contiguous memory access typename MatrixType::InnerLowerIterator lIt3(m_lu, col); for (Index rrow = row + 1; rrow < m_lu.rows(); rrow++) { typename MatrixType::InnerUpperIterator uItPivot(m_lu, row); const double coef = lIt3.value(); //update lower part -> non contiguous memory access for (Index i = 0; i < rrow - row - 1; i++) { m_lu.coeffRefLower(rrow, row + i + 1) -= uItPivot.value() * coef; ++uItPivot; } ++lIt3; } //update diag -> contiguous typename MatrixType::InnerLowerIterator lIt2(m_lu, col); for (Index rrow = row + 1; rrow < m_lu.rows(); rrow++) { typename MatrixType::InnerUpperIterator uItPivot(m_lu, row); typename MatrixType::InnerUpperIterator uIt(m_lu, rrow); const double coef = lIt2.value(); uItPivot += (rrow - row - 1); m_lu.coeffRefDiag(rrow) -= uItPivot.value() * coef; ++lIt2; } } } template void SkylineInplaceLU::computeRowMajor() { const size_t rows = m_lu.rows(); const size_t cols = m_lu.cols(); eigen_assert(rows == cols && "We do not (yet) support rectangular LU."); eigen_assert(m_lu.IsRowMajor && "You're trying to apply rowMajor decomposition on a ColMajor matrix !"); for (Index row = 0; row < rows; row++) { typename MatrixType::InnerLowerIterator llIt(m_lu, row); for (Index col = llIt.col(); col < row; col++) { if (m_lu.coeffExistLower(row, col)) { const double diag = m_lu.coeffDiag(col); typename MatrixType::InnerLowerIterator lIt(m_lu, row); typename MatrixType::InnerUpperIterator uIt(m_lu, col); const Index offset = lIt.col() - uIt.row(); Index stop = offset > 0 ? col - lIt.col() : col - uIt.row(); //#define VECTORIZE #ifdef VECTORIZE Map rowVal(lIt.valuePtr() + (offset > 0 ? 0 : -offset), stop); Map colVal(uIt.valuePtr() + (offset > 0 ? offset : 0), stop); Scalar newCoeff = m_lu.coeffLower(row, col) - rowVal.dot(colVal); #else if (offset > 0) //Skip zero value of lIt uIt += offset; else //Skip zero values of uIt lIt += -offset; Scalar newCoeff = m_lu.coeffLower(row, col); for (Index k = 0; k < stop; ++k) { const Scalar tmp = newCoeff; newCoeff = tmp - lIt.value() * uIt.value(); ++lIt; ++uIt; } #endif m_lu.coeffRefLower(row, col) = newCoeff / diag; } } //Upper matrix update const Index col = row; typename MatrixType::InnerUpperIterator uuIt(m_lu, col); for (Index rrow = uuIt.row(); rrow < col; rrow++) { typename MatrixType::InnerLowerIterator lIt(m_lu, rrow); typename MatrixType::InnerUpperIterator uIt(m_lu, col); const Index offset = lIt.col() - uIt.row(); Index stop = offset > 0 ? rrow - lIt.col() : rrow - uIt.row(); #ifdef VECTORIZE Map rowVal(lIt.valuePtr() + (offset > 0 ? 0 : -offset), stop); Map colVal(uIt.valuePtr() + (offset > 0 ? offset : 0), stop); Scalar newCoeff = m_lu.coeffUpper(rrow, col) - rowVal.dot(colVal); #else if (offset > 0) //Skip zero value of lIt uIt += offset; else //Skip zero values of uIt lIt += -offset; Scalar newCoeff = m_lu.coeffUpper(rrow, col); for (Index k = 0; k < stop; ++k) { const Scalar tmp = newCoeff; newCoeff = tmp - lIt.value() * uIt.value(); ++lIt; ++uIt; } #endif m_lu.coeffRefUpper(rrow, col) = newCoeff; } //Diag matrix update typename MatrixType::InnerLowerIterator lIt(m_lu, row); typename MatrixType::InnerUpperIterator uIt(m_lu, row); const Index offset = lIt.col() - uIt.row(); Index stop = offset > 0 ? lIt.size() : uIt.size(); #ifdef VECTORIZE Map rowVal(lIt.valuePtr() + (offset > 0 ? 0 : -offset), stop); Map colVal(uIt.valuePtr() + (offset > 0 ? offset : 0), stop); Scalar newCoeff = m_lu.coeffDiag(row) - rowVal.dot(colVal); #else if (offset > 0) //Skip zero value of lIt uIt += offset; else //Skip zero values of uIt lIt += -offset; Scalar newCoeff = m_lu.coeffDiag(row); for (Index k = 0; k < stop; ++k) { const Scalar tmp = newCoeff; newCoeff = tmp - lIt.value() * uIt.value(); ++lIt; ++uIt; } #endif m_lu.coeffRefDiag(row) = newCoeff; } } /** Computes *x = U^-1 L^-1 b * * If \a transpose is set to SvTranspose or SvAdjoint, the solution * of the transposed/adjoint system is computed instead. * * Not all backends implement the solution of the transposed or * adjoint system. */ template template bool SkylineInplaceLU::solve(const MatrixBase &b, MatrixBase* x, const int transposed) const { const size_t rows = m_lu.rows(); const size_t cols = m_lu.cols(); for (Index row = 0; row < rows; row++) { x->coeffRef(row) = b.coeff(row); Scalar newVal = x->coeff(row); typename MatrixType::InnerLowerIterator lIt(m_lu, row); Index col = lIt.col(); while (lIt.col() < row) { newVal -= x->coeff(col++) * lIt.value(); ++lIt; } x->coeffRef(row) = newVal; } for (Index col = rows - 1; col > 0; col--) { x->coeffRef(col) = x->coeff(col) / m_lu.coeffDiag(col); const Scalar x_col = x->coeff(col); typename MatrixType::InnerUpperIterator uIt(m_lu, col); uIt += uIt.size()-1; while (uIt) { x->coeffRef(uIt.row()) -= x_col * uIt.value(); //TODO : introduce --operator uIt += -1; } } x->coeffRef(0) = x->coeff(0) / m_lu.coeffDiag(0); return true; } } // end namespace Eigen #endif // EIGEN_SKYLINELU_H RcppEigen/inst/include/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h0000644000176200001440000001710112253717461026131 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Guillaume Saupin // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SKYLINEMATRIXBASE_H #define EIGEN_SKYLINEMATRIXBASE_H #include "SkylineUtil.h" namespace Eigen { /** \ingroup Skyline_Module * * \class SkylineMatrixBase * * \brief Base class of any skyline matrices or skyline expressions * * \param Derived * */ template class SkylineMatrixBase : public EigenBase { public: typedef typename internal::traits::Scalar Scalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::index::type Index; enum { RowsAtCompileTime = internal::traits::RowsAtCompileTime, /**< The number of rows at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */ ColsAtCompileTime = internal::traits::ColsAtCompileTime, /**< The number of columns at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */ SizeAtCompileTime = (internal::size_at_compile_time::RowsAtCompileTime, internal::traits::ColsAtCompileTime>::ret), /**< This is equal to the number of coefficients, i.e. the number of * rows times the number of columns, or to \a Dynamic if this is not * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ MaxRowsAtCompileTime = RowsAtCompileTime, MaxColsAtCompileTime = ColsAtCompileTime, MaxSizeAtCompileTime = (internal::size_at_compile_time::ret), IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1, /**< This is set to true if either the number of rows or the number of * columns is known at compile-time to be equal to 1. Indeed, in that case, * we are dealing with a column-vector (if there is only one column) or with * a row-vector (if there is only one row). */ Flags = internal::traits::Flags, /**< This stores expression \ref flags flags which may or may not be inherited by new expressions * constructed from this one. See the \ref flags "list of flags". */ CoeffReadCost = internal::traits::CoeffReadCost, /**< This is a rough measure of how expensive it is to read one coefficient from * this expression. */ IsRowMajor = Flags & RowMajorBit ? 1 : 0 }; #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is the "real scalar" type; if the \a Scalar type is already real numbers * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If * \a Scalar is \a std::complex then RealScalar is \a T. * * \sa class NumTraits */ typedef typename NumTraits::Real RealScalar; /** type of the equivalent square matrix */ typedef Matrix SquareMatrixType; inline const Derived& derived() const { return *static_cast (this); } inline Derived& derived() { return *static_cast (this); } inline Derived& const_cast_derived() const { return *static_cast (const_cast (this)); } #endif // not EIGEN_PARSED_BY_DOXYGEN /** \returns the number of rows. \sa cols(), RowsAtCompileTime */ inline Index rows() const { return derived().rows(); } /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ inline Index cols() const { return derived().cols(); } /** \returns the number of coefficients, which is \a rows()*cols(). * \sa rows(), cols(), SizeAtCompileTime. */ inline Index size() const { return rows() * cols(); } /** \returns the number of nonzero coefficients which is in practice the number * of stored coefficients. */ inline Index nonZeros() const { return derived().nonZeros(); } /** \returns the size of the storage major dimension, * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */ Index outerSize() const { return (int(Flags) & RowMajorBit) ? this->rows() : this->cols(); } /** \returns the size of the inner dimension according to the storage order, * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */ Index innerSize() const { return (int(Flags) & RowMajorBit) ? this->cols() : this->rows(); } bool isRValue() const { return m_isRValue; } Derived& markAsRValue() { m_isRValue = true; return derived(); } SkylineMatrixBase() : m_isRValue(false) { /* TODO check flags */ } inline Derived & operator=(const Derived& other) { this->operator= (other); return derived(); } template inline void assignGeneric(const OtherDerived& other) { derived().resize(other.rows(), other.cols()); for (Index row = 0; row < rows(); row++) for (Index col = 0; col < cols(); col++) { if (other.coeff(row, col) != Scalar(0)) derived().insert(row, col) = other.coeff(row, col); } derived().finalize(); } template inline Derived & operator=(const SkylineMatrixBase& other) { //TODO } template inline Derived & operator=(const SkylineProduct& product); friend std::ostream & operator <<(std::ostream & s, const SkylineMatrixBase& m) { s << m.derived(); return s; } template const typename SkylineProductReturnType::Type operator*(const MatrixBase &other) const; /** \internal use operator= */ template void evalTo(MatrixBase& dst) const { dst.setZero(); for (Index i = 0; i < rows(); i++) for (Index j = 0; j < rows(); j++) dst(i, j) = derived().coeff(i, j); } Matrix toDense() const { return derived(); } /** \returns the matrix or vector obtained by evaluating this expression. * * Notice that in the case of a plain matrix or vector (not an expression) this function just returns * a const reference, in order to avoid a useless copy. */ EIGEN_STRONG_INLINE const typename internal::eval::type eval() const { return typename internal::eval::type(derived()); } protected: bool m_isRValue; }; } // end namespace Eigen #endif // EIGEN_SkylineMatrixBase_H RcppEigen/inst/include/unsupported/Eigen/src/Skyline/SkylineStorage.h0000644000176200001440000001744112253717461025505 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Guillaume Saupin // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SKYLINE_STORAGE_H #define EIGEN_SKYLINE_STORAGE_H namespace Eigen { /** Stores a skyline set of values in three structures : * The diagonal elements * The upper elements * The lower elements * */ template class SkylineStorage { typedef typename NumTraits::Real RealScalar; typedef SparseIndex Index; public: SkylineStorage() : m_diag(0), m_lower(0), m_upper(0), m_lowerProfile(0), m_upperProfile(0), m_diagSize(0), m_upperSize(0), m_lowerSize(0), m_upperProfileSize(0), m_lowerProfileSize(0), m_allocatedSize(0) { } SkylineStorage(const SkylineStorage& other) : m_diag(0), m_lower(0), m_upper(0), m_lowerProfile(0), m_upperProfile(0), m_diagSize(0), m_upperSize(0), m_lowerSize(0), m_upperProfileSize(0), m_lowerProfileSize(0), m_allocatedSize(0) { *this = other; } SkylineStorage & operator=(const SkylineStorage& other) { resize(other.diagSize(), other.m_upperProfileSize, other.m_lowerProfileSize, other.upperSize(), other.lowerSize()); memcpy(m_diag, other.m_diag, m_diagSize * sizeof (Scalar)); memcpy(m_upper, other.m_upper, other.upperSize() * sizeof (Scalar)); memcpy(m_lower, other.m_lower, other.lowerSize() * sizeof (Scalar)); memcpy(m_upperProfile, other.m_upperProfile, m_upperProfileSize * sizeof (Index)); memcpy(m_lowerProfile, other.m_lowerProfile, m_lowerProfileSize * sizeof (Index)); return *this; } void swap(SkylineStorage& other) { std::swap(m_diag, other.m_diag); std::swap(m_upper, other.m_upper); std::swap(m_lower, other.m_lower); std::swap(m_upperProfile, other.m_upperProfile); std::swap(m_lowerProfile, other.m_lowerProfile); std::swap(m_diagSize, other.m_diagSize); std::swap(m_upperSize, other.m_upperSize); std::swap(m_lowerSize, other.m_lowerSize); std::swap(m_allocatedSize, other.m_allocatedSize); } ~SkylineStorage() { delete[] m_diag; delete[] m_upper; if (m_upper != m_lower) delete[] m_lower; delete[] m_upperProfile; delete[] m_lowerProfile; } void reserve(Index size, Index upperProfileSize, Index lowerProfileSize, Index upperSize, Index lowerSize) { Index newAllocatedSize = size + upperSize + lowerSize; if (newAllocatedSize > m_allocatedSize) reallocate(size, upperProfileSize, lowerProfileSize, upperSize, lowerSize); } void squeeze() { if (m_allocatedSize > m_diagSize + m_upperSize + m_lowerSize) reallocate(m_diagSize, m_upperProfileSize, m_lowerProfileSize, m_upperSize, m_lowerSize); } void resize(Index diagSize, Index upperProfileSize, Index lowerProfileSize, Index upperSize, Index lowerSize, float reserveSizeFactor = 0) { if (m_allocatedSize < diagSize + upperSize + lowerSize) reallocate(diagSize, upperProfileSize, lowerProfileSize, upperSize + Index(reserveSizeFactor * upperSize), lowerSize + Index(reserveSizeFactor * lowerSize)); m_diagSize = diagSize; m_upperSize = upperSize; m_lowerSize = lowerSize; m_upperProfileSize = upperProfileSize; m_lowerProfileSize = lowerProfileSize; } inline Index diagSize() const { return m_diagSize; } inline Index upperSize() const { return m_upperSize; } inline Index lowerSize() const { return m_lowerSize; } inline Index upperProfileSize() const { return m_upperProfileSize; } inline Index lowerProfileSize() const { return m_lowerProfileSize; } inline Index allocatedSize() const { return m_allocatedSize; } inline void clear() { m_diagSize = 0; } inline Scalar& diag(Index i) { return m_diag[i]; } inline const Scalar& diag(Index i) const { return m_diag[i]; } inline Scalar& upper(Index i) { return m_upper[i]; } inline const Scalar& upper(Index i) const { return m_upper[i]; } inline Scalar& lower(Index i) { return m_lower[i]; } inline const Scalar& lower(Index i) const { return m_lower[i]; } inline Index& upperProfile(Index i) { return m_upperProfile[i]; } inline const Index& upperProfile(Index i) const { return m_upperProfile[i]; } inline Index& lowerProfile(Index i) { return m_lowerProfile[i]; } inline const Index& lowerProfile(Index i) const { return m_lowerProfile[i]; } static SkylineStorage Map(Index* upperProfile, Index* lowerProfile, Scalar* diag, Scalar* upper, Scalar* lower, Index size, Index upperSize, Index lowerSize) { SkylineStorage res; res.m_upperProfile = upperProfile; res.m_lowerProfile = lowerProfile; res.m_diag = diag; res.m_upper = upper; res.m_lower = lower; res.m_allocatedSize = res.m_diagSize = size; res.m_upperSize = upperSize; res.m_lowerSize = lowerSize; return res; } inline void reset() { memset(m_diag, 0, m_diagSize * sizeof (Scalar)); memset(m_upper, 0, m_upperSize * sizeof (Scalar)); memset(m_lower, 0, m_lowerSize * sizeof (Scalar)); memset(m_upperProfile, 0, m_diagSize * sizeof (Index)); memset(m_lowerProfile, 0, m_diagSize * sizeof (Index)); } void prune(Scalar reference, RealScalar epsilon = dummy_precision()) { //TODO } protected: inline void reallocate(Index diagSize, Index upperProfileSize, Index lowerProfileSize, Index upperSize, Index lowerSize) { Scalar* diag = new Scalar[diagSize]; Scalar* upper = new Scalar[upperSize]; Scalar* lower = new Scalar[lowerSize]; Index* upperProfile = new Index[upperProfileSize]; Index* lowerProfile = new Index[lowerProfileSize]; Index copyDiagSize = (std::min)(diagSize, m_diagSize); Index copyUpperSize = (std::min)(upperSize, m_upperSize); Index copyLowerSize = (std::min)(lowerSize, m_lowerSize); Index copyUpperProfileSize = (std::min)(upperProfileSize, m_upperProfileSize); Index copyLowerProfileSize = (std::min)(lowerProfileSize, m_lowerProfileSize); // copy memcpy(diag, m_diag, copyDiagSize * sizeof (Scalar)); memcpy(upper, m_upper, copyUpperSize * sizeof (Scalar)); memcpy(lower, m_lower, copyLowerSize * sizeof (Scalar)); memcpy(upperProfile, m_upperProfile, copyUpperProfileSize * sizeof (Index)); memcpy(lowerProfile, m_lowerProfile, copyLowerProfileSize * sizeof (Index)); // delete old stuff delete[] m_diag; delete[] m_upper; delete[] m_lower; delete[] m_upperProfile; delete[] m_lowerProfile; m_diag = diag; m_upper = upper; m_lower = lower; m_upperProfile = upperProfile; m_lowerProfile = lowerProfile; m_allocatedSize = diagSize + upperSize + lowerSize; m_upperSize = upperSize; m_lowerSize = lowerSize; } public: Scalar* m_diag; Scalar* m_upper; Scalar* m_lower; Index* m_upperProfile; Index* m_lowerProfile; Index m_diagSize; Index m_upperSize; Index m_lowerSize; Index m_upperProfileSize; Index m_lowerProfileSize; Index m_allocatedSize; }; } // end namespace Eigen #endif // EIGEN_COMPRESSED_STORAGE_H RcppEigen/inst/include/unsupported/Eigen/src/Skyline/SkylineMatrix.h0000644000176200001440000007453112253717461025350 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Guillaume Saupin // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SKYLINEMATRIX_H #define EIGEN_SKYLINEMATRIX_H #include "SkylineStorage.h" #include "SkylineMatrixBase.h" namespace Eigen { /** \ingroup Skyline_Module * * \class SkylineMatrix * * \brief The main skyline matrix class * * This class implements a skyline matrix using the very uncommon storage * scheme. * * \param _Scalar the scalar type, i.e. the type of the coefficients * \param _Options Union of bit flags controlling the storage scheme. Currently the only possibility * is RowMajor. The default is 0 which means column-major. * * */ namespace internal { template struct traits > { typedef _Scalar Scalar; typedef Sparse StorageKind; enum { RowsAtCompileTime = Dynamic, ColsAtCompileTime = Dynamic, MaxRowsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic, Flags = SkylineBit | _Options, CoeffReadCost = NumTraits::ReadCost, }; }; } template class SkylineMatrix : public SkylineMatrixBase > { public: EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(SkylineMatrix) EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(SkylineMatrix, +=) EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(SkylineMatrix, -=) using Base::IsRowMajor; protected: typedef SkylineMatrix TransposedSkylineMatrix; Index m_outerSize; Index m_innerSize; public: Index* m_colStartIndex; Index* m_rowStartIndex; SkylineStorage m_data; public: inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } inline Index innerSize() const { return m_innerSize; } inline Index outerSize() const { return m_outerSize; } inline Index upperNonZeros() const { return m_data.upperSize(); } inline Index lowerNonZeros() const { return m_data.lowerSize(); } inline Index upperNonZeros(Index j) const { return m_colStartIndex[j + 1] - m_colStartIndex[j]; } inline Index lowerNonZeros(Index j) const { return m_rowStartIndex[j + 1] - m_rowStartIndex[j]; } inline const Scalar* _diagPtr() const { return &m_data.diag(0); } inline Scalar* _diagPtr() { return &m_data.diag(0); } inline const Scalar* _upperPtr() const { return &m_data.upper(0); } inline Scalar* _upperPtr() { return &m_data.upper(0); } inline const Scalar* _lowerPtr() const { return &m_data.lower(0); } inline Scalar* _lowerPtr() { return &m_data.lower(0); } inline const Index* _upperProfilePtr() const { return &m_data.upperProfile(0); } inline Index* _upperProfilePtr() { return &m_data.upperProfile(0); } inline const Index* _lowerProfilePtr() const { return &m_data.lowerProfile(0); } inline Index* _lowerProfilePtr() { return &m_data.lowerProfile(0); } inline Scalar coeff(Index row, Index col) const { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); if (outer == inner) return this->m_data.diag(outer); if (IsRowMajor) { if (inner > outer) //upper matrix { const Index minOuterIndex = inner - m_data.upperProfile(inner); if (outer >= minOuterIndex) return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner))); else return Scalar(0); } if (inner < outer) //lower matrix { const Index minInnerIndex = outer - m_data.lowerProfile(outer); if (inner >= minInnerIndex) return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer))); else return Scalar(0); } return m_data.upper(m_colStartIndex[inner] + outer - inner); } else { if (outer > inner) //upper matrix { const Index maxOuterIndex = inner + m_data.upperProfile(inner); if (outer <= maxOuterIndex) return this->m_data.upper(m_colStartIndex[inner] + (outer - inner)); else return Scalar(0); } if (outer < inner) //lower matrix { const Index maxInnerIndex = outer + m_data.lowerProfile(outer); if (inner <= maxInnerIndex) return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer)); else return Scalar(0); } } } inline Scalar& coeffRef(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); if (outer == inner) return this->m_data.diag(outer); if (IsRowMajor) { if (col > row) //upper matrix { const Index minOuterIndex = inner - m_data.upperProfile(inner); eigen_assert(outer >= minOuterIndex && "you try to acces a coeff that do not exist in the storage"); return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner))); } if (col < row) //lower matrix { const Index minInnerIndex = outer - m_data.lowerProfile(outer); eigen_assert(inner >= minInnerIndex && "you try to acces a coeff that do not exist in the storage"); return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer))); } } else { if (outer > inner) //upper matrix { const Index maxOuterIndex = inner + m_data.upperProfile(inner); eigen_assert(outer <= maxOuterIndex && "you try to acces a coeff that do not exist in the storage"); return this->m_data.upper(m_colStartIndex[inner] + (outer - inner)); } if (outer < inner) //lower matrix { const Index maxInnerIndex = outer + m_data.lowerProfile(outer); eigen_assert(inner <= maxInnerIndex && "you try to acces a coeff that do not exist in the storage"); return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer)); } } } inline Scalar coeffDiag(Index idx) const { eigen_assert(idx < outerSize()); eigen_assert(idx < innerSize()); return this->m_data.diag(idx); } inline Scalar coeffLower(Index row, Index col) const { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); eigen_assert(inner != outer); if (IsRowMajor) { const Index minInnerIndex = outer - m_data.lowerProfile(outer); if (inner >= minInnerIndex) return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer))); else return Scalar(0); } else { const Index maxInnerIndex = outer + m_data.lowerProfile(outer); if (inner <= maxInnerIndex) return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer)); else return Scalar(0); } } inline Scalar coeffUpper(Index row, Index col) const { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); eigen_assert(inner != outer); if (IsRowMajor) { const Index minOuterIndex = inner - m_data.upperProfile(inner); if (outer >= minOuterIndex) return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner))); else return Scalar(0); } else { const Index maxOuterIndex = inner + m_data.upperProfile(inner); if (outer <= maxOuterIndex) return this->m_data.upper(m_colStartIndex[inner] + (outer - inner)); else return Scalar(0); } } inline Scalar& coeffRefDiag(Index idx) { eigen_assert(idx < outerSize()); eigen_assert(idx < innerSize()); return this->m_data.diag(idx); } inline Scalar& coeffRefLower(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); eigen_assert(inner != outer); if (IsRowMajor) { const Index minInnerIndex = outer - m_data.lowerProfile(outer); eigen_assert(inner >= minInnerIndex && "you try to acces a coeff that do not exist in the storage"); return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer))); } else { const Index maxInnerIndex = outer + m_data.lowerProfile(outer); eigen_assert(inner <= maxInnerIndex && "you try to acces a coeff that do not exist in the storage"); return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer)); } } inline bool coeffExistLower(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); eigen_assert(inner != outer); if (IsRowMajor) { const Index minInnerIndex = outer - m_data.lowerProfile(outer); return inner >= minInnerIndex; } else { const Index maxInnerIndex = outer + m_data.lowerProfile(outer); return inner <= maxInnerIndex; } } inline Scalar& coeffRefUpper(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); eigen_assert(inner != outer); if (IsRowMajor) { const Index minOuterIndex = inner - m_data.upperProfile(inner); eigen_assert(outer >= minOuterIndex && "you try to acces a coeff that do not exist in the storage"); return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner))); } else { const Index maxOuterIndex = inner + m_data.upperProfile(inner); eigen_assert(outer <= maxOuterIndex && "you try to acces a coeff that do not exist in the storage"); return this->m_data.upper(m_colStartIndex[inner] + (outer - inner)); } } inline bool coeffExistUpper(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); eigen_assert(inner != outer); if (IsRowMajor) { const Index minOuterIndex = inner - m_data.upperProfile(inner); return outer >= minOuterIndex; } else { const Index maxOuterIndex = inner + m_data.upperProfile(inner); return outer <= maxOuterIndex; } } protected: public: class InnerUpperIterator; class InnerLowerIterator; class OuterUpperIterator; class OuterLowerIterator; /** Removes all non zeros */ inline void setZero() { m_data.clear(); memset(m_colStartIndex, 0, (m_outerSize + 1) * sizeof (Index)); memset(m_rowStartIndex, 0, (m_outerSize + 1) * sizeof (Index)); } /** \returns the number of non zero coefficients */ inline Index nonZeros() const { return m_data.diagSize() + m_data.upperSize() + m_data.lowerSize(); } /** Preallocates \a reserveSize non zeros */ inline void reserve(Index reserveSize, Index reserveUpperSize, Index reserveLowerSize) { m_data.reserve(reserveSize, reserveUpperSize, reserveLowerSize); } /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col. * * \warning This function can be extremely slow if the non zero coefficients * are not inserted in a coherent order. * * After an insertion session, you should call the finalize() function. */ EIGEN_DONT_INLINE Scalar & insert(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); if (outer == inner) return m_data.diag(col); if (IsRowMajor) { if (outer < inner) //upper matrix { Index minOuterIndex = 0; minOuterIndex = inner - m_data.upperProfile(inner); if (outer < minOuterIndex) //The value does not yet exist { const Index previousProfile = m_data.upperProfile(inner); m_data.upperProfile(inner) = inner - outer; const Index bandIncrement = m_data.upperProfile(inner) - previousProfile; //shift data stored after this new one const Index stop = m_colStartIndex[cols()]; const Index start = m_colStartIndex[inner]; for (Index innerIdx = stop; innerIdx >= start; innerIdx--) { m_data.upper(innerIdx + bandIncrement) = m_data.upper(innerIdx); } for (Index innerIdx = cols(); innerIdx > inner; innerIdx--) { m_colStartIndex[innerIdx] += bandIncrement; } //zeros new data memset(this->_upperPtr() + start, 0, (bandIncrement - 1) * sizeof (Scalar)); return m_data.upper(m_colStartIndex[inner]); } else { return m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner))); } } if (outer > inner) //lower matrix { const Index minInnerIndex = outer - m_data.lowerProfile(outer); if (inner < minInnerIndex) //The value does not yet exist { const Index previousProfile = m_data.lowerProfile(outer); m_data.lowerProfile(outer) = outer - inner; const Index bandIncrement = m_data.lowerProfile(outer) - previousProfile; //shift data stored after this new one const Index stop = m_rowStartIndex[rows()]; const Index start = m_rowStartIndex[outer]; for (Index innerIdx = stop; innerIdx >= start; innerIdx--) { m_data.lower(innerIdx + bandIncrement) = m_data.lower(innerIdx); } for (Index innerIdx = rows(); innerIdx > outer; innerIdx--) { m_rowStartIndex[innerIdx] += bandIncrement; } //zeros new data memset(this->_lowerPtr() + start, 0, (bandIncrement - 1) * sizeof (Scalar)); return m_data.lower(m_rowStartIndex[outer]); } else { return m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer))); } } } else { if (outer > inner) //upper matrix { const Index maxOuterIndex = inner + m_data.upperProfile(inner); if (outer > maxOuterIndex) //The value does not yet exist { const Index previousProfile = m_data.upperProfile(inner); m_data.upperProfile(inner) = outer - inner; const Index bandIncrement = m_data.upperProfile(inner) - previousProfile; //shift data stored after this new one const Index stop = m_rowStartIndex[rows()]; const Index start = m_rowStartIndex[inner + 1]; for (Index innerIdx = stop; innerIdx >= start; innerIdx--) { m_data.upper(innerIdx + bandIncrement) = m_data.upper(innerIdx); } for (Index innerIdx = inner + 1; innerIdx < outerSize() + 1; innerIdx++) { m_rowStartIndex[innerIdx] += bandIncrement; } memset(this->_upperPtr() + m_rowStartIndex[inner] + previousProfile + 1, 0, (bandIncrement - 1) * sizeof (Scalar)); return m_data.upper(m_rowStartIndex[inner] + m_data.upperProfile(inner)); } else { return m_data.upper(m_rowStartIndex[inner] + (outer - inner)); } } if (outer < inner) //lower matrix { const Index maxInnerIndex = outer + m_data.lowerProfile(outer); if (inner > maxInnerIndex) //The value does not yet exist { const Index previousProfile = m_data.lowerProfile(outer); m_data.lowerProfile(outer) = inner - outer; const Index bandIncrement = m_data.lowerProfile(outer) - previousProfile; //shift data stored after this new one const Index stop = m_colStartIndex[cols()]; const Index start = m_colStartIndex[outer + 1]; for (Index innerIdx = stop; innerIdx >= start; innerIdx--) { m_data.lower(innerIdx + bandIncrement) = m_data.lower(innerIdx); } for (Index innerIdx = outer + 1; innerIdx < outerSize() + 1; innerIdx++) { m_colStartIndex[innerIdx] += bandIncrement; } memset(this->_lowerPtr() + m_colStartIndex[outer] + previousProfile + 1, 0, (bandIncrement - 1) * sizeof (Scalar)); return m_data.lower(m_colStartIndex[outer] + m_data.lowerProfile(outer)); } else { return m_data.lower(m_colStartIndex[outer] + (inner - outer)); } } } } /** Must be called after inserting a set of non zero entries. */ inline void finalize() { if (IsRowMajor) { if (rows() > cols()) m_data.resize(cols(), cols(), rows(), m_colStartIndex[cols()] + 1, m_rowStartIndex[rows()] + 1); else m_data.resize(rows(), cols(), rows(), m_colStartIndex[cols()] + 1, m_rowStartIndex[rows()] + 1); // eigen_assert(rows() == cols() && "memory reorganisatrion only works with suare matrix"); // // Scalar* newArray = new Scalar[m_colStartIndex[cols()] + 1 + m_rowStartIndex[rows()] + 1]; // Index dataIdx = 0; // for (Index row = 0; row < rows(); row++) { // // const Index nbLowerElts = m_rowStartIndex[row + 1] - m_rowStartIndex[row]; // // std::cout << "nbLowerElts" << nbLowerElts << std::endl; // memcpy(newArray + dataIdx, m_data.m_lower + m_rowStartIndex[row], nbLowerElts * sizeof (Scalar)); // m_rowStartIndex[row] = dataIdx; // dataIdx += nbLowerElts; // // const Index nbUpperElts = m_colStartIndex[row + 1] - m_colStartIndex[row]; // memcpy(newArray + dataIdx, m_data.m_upper + m_colStartIndex[row], nbUpperElts * sizeof (Scalar)); // m_colStartIndex[row] = dataIdx; // dataIdx += nbUpperElts; // // // } // //todo : don't access m_data profile directly : add an accessor from SkylineMatrix // m_rowStartIndex[rows()] = m_rowStartIndex[rows()-1] + m_data.lowerProfile(rows()-1); // m_colStartIndex[cols()] = m_colStartIndex[cols()-1] + m_data.upperProfile(cols()-1); // // delete[] m_data.m_lower; // delete[] m_data.m_upper; // // m_data.m_lower = newArray; // m_data.m_upper = newArray; } else { if (rows() > cols()) m_data.resize(cols(), rows(), cols(), m_rowStartIndex[cols()] + 1, m_colStartIndex[cols()] + 1); else m_data.resize(rows(), rows(), cols(), m_rowStartIndex[rows()] + 1, m_colStartIndex[rows()] + 1); } } inline void squeeze() { finalize(); m_data.squeeze(); } void prune(Scalar reference, RealScalar epsilon = dummy_precision ()) { //TODO } /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero * \sa resizeNonZeros(Index), reserve(), setZero() */ void resize(size_t rows, size_t cols) { const Index diagSize = rows > cols ? cols : rows; m_innerSize = IsRowMajor ? cols : rows; eigen_assert(rows == cols && "Skyline matrix must be square matrix"); if (diagSize % 2) { // diagSize is odd const Index k = (diagSize - 1) / 2; m_data.resize(diagSize, IsRowMajor ? cols : rows, IsRowMajor ? rows : cols, 2 * k * k + k + 1, 2 * k * k + k + 1); } else // diagSize is even { const Index k = diagSize / 2; m_data.resize(diagSize, IsRowMajor ? cols : rows, IsRowMajor ? rows : cols, 2 * k * k - k + 1, 2 * k * k - k + 1); } if (m_colStartIndex && m_rowStartIndex) { delete[] m_colStartIndex; delete[] m_rowStartIndex; } m_colStartIndex = new Index [cols + 1]; m_rowStartIndex = new Index [rows + 1]; m_outerSize = diagSize; m_data.reset(); m_data.clear(); m_outerSize = diagSize; memset(m_colStartIndex, 0, (cols + 1) * sizeof (Index)); memset(m_rowStartIndex, 0, (rows + 1) * sizeof (Index)); } void resizeNonZeros(Index size) { m_data.resize(size); } inline SkylineMatrix() : m_outerSize(-1), m_innerSize(0), m_colStartIndex(0), m_rowStartIndex(0) { resize(0, 0); } inline SkylineMatrix(size_t rows, size_t cols) : m_outerSize(0), m_innerSize(0), m_colStartIndex(0), m_rowStartIndex(0) { resize(rows, cols); } template inline SkylineMatrix(const SkylineMatrixBase& other) : m_outerSize(0), m_innerSize(0), m_colStartIndex(0), m_rowStartIndex(0) { *this = other.derived(); } inline SkylineMatrix(const SkylineMatrix & other) : Base(), m_outerSize(0), m_innerSize(0), m_colStartIndex(0), m_rowStartIndex(0) { *this = other.derived(); } inline void swap(SkylineMatrix & other) { //EIGEN_DBG_SKYLINE(std::cout << "SkylineMatrix:: swap\n"); std::swap(m_colStartIndex, other.m_colStartIndex); std::swap(m_rowStartIndex, other.m_rowStartIndex); std::swap(m_innerSize, other.m_innerSize); std::swap(m_outerSize, other.m_outerSize); m_data.swap(other.m_data); } inline SkylineMatrix & operator=(const SkylineMatrix & other) { std::cout << "SkylineMatrix& operator=(const SkylineMatrix& other)\n"; if (other.isRValue()) { swap(other.const_cast_derived()); } else { resize(other.rows(), other.cols()); memcpy(m_colStartIndex, other.m_colStartIndex, (m_outerSize + 1) * sizeof (Index)); memcpy(m_rowStartIndex, other.m_rowStartIndex, (m_outerSize + 1) * sizeof (Index)); m_data = other.m_data; } return *this; } template inline SkylineMatrix & operator=(const SkylineMatrixBase& other) { const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); if (needToTranspose) { // TODO // return *this; } else { // there is no special optimization return SkylineMatrixBase::operator=(other.derived()); } } friend std::ostream & operator <<(std::ostream & s, const SkylineMatrix & m) { EIGEN_DBG_SKYLINE( std::cout << "upper elements : " << std::endl; for (Index i = 0; i < m.m_data.upperSize(); i++) std::cout << m.m_data.upper(i) << "\t"; std::cout << std::endl; std::cout << "upper profile : " << std::endl; for (Index i = 0; i < m.m_data.upperProfileSize(); i++) std::cout << m.m_data.upperProfile(i) << "\t"; std::cout << std::endl; std::cout << "lower startIdx : " << std::endl; for (Index i = 0; i < m.m_data.upperProfileSize(); i++) std::cout << (IsRowMajor ? m.m_colStartIndex[i] : m.m_rowStartIndex[i]) << "\t"; std::cout << std::endl; std::cout << "lower elements : " << std::endl; for (Index i = 0; i < m.m_data.lowerSize(); i++) std::cout << m.m_data.lower(i) << "\t"; std::cout << std::endl; std::cout << "lower profile : " << std::endl; for (Index i = 0; i < m.m_data.lowerProfileSize(); i++) std::cout << m.m_data.lowerProfile(i) << "\t"; std::cout << std::endl; std::cout << "lower startIdx : " << std::endl; for (Index i = 0; i < m.m_data.lowerProfileSize(); i++) std::cout << (IsRowMajor ? m.m_rowStartIndex[i] : m.m_colStartIndex[i]) << "\t"; std::cout << std::endl; ); for (Index rowIdx = 0; rowIdx < m.rows(); rowIdx++) { for (Index colIdx = 0; colIdx < m.cols(); colIdx++) { s << m.coeff(rowIdx, colIdx) << "\t"; } s << std::endl; } return s; } /** Destructor */ inline ~SkylineMatrix() { delete[] m_colStartIndex; delete[] m_rowStartIndex; } /** Overloaded for performance */ Scalar sum() const; }; template class SkylineMatrix::InnerUpperIterator { public: InnerUpperIterator(const SkylineMatrix& mat, Index outer) : m_matrix(mat), m_outer(outer), m_id(_Options == RowMajor ? mat.m_colStartIndex[outer] : mat.m_rowStartIndex[outer] + 1), m_start(m_id), m_end(_Options == RowMajor ? mat.m_colStartIndex[outer + 1] : mat.m_rowStartIndex[outer + 1] + 1) { } inline InnerUpperIterator & operator++() { m_id++; return *this; } inline InnerUpperIterator & operator+=(Index shift) { m_id += shift; return *this; } inline Scalar value() const { return m_matrix.m_data.upper(m_id); } inline Scalar* valuePtr() { return const_cast (&(m_matrix.m_data.upper(m_id))); } inline Scalar& valueRef() { return const_cast (m_matrix.m_data.upper(m_id)); } inline Index index() const { return IsRowMajor ? m_outer - m_matrix.m_data.upperProfile(m_outer) + (m_id - m_start) : m_outer + (m_id - m_start) + 1; } inline Index row() const { return IsRowMajor ? index() : m_outer; } inline Index col() const { return IsRowMajor ? m_outer : index(); } inline size_t size() const { return m_matrix.m_data.upperProfile(m_outer); } inline operator bool() const { return (m_id < m_end) && (m_id >= m_start); } protected: const SkylineMatrix& m_matrix; const Index m_outer; Index m_id; const Index m_start; const Index m_end; }; template class SkylineMatrix::InnerLowerIterator { public: InnerLowerIterator(const SkylineMatrix& mat, Index outer) : m_matrix(mat), m_outer(outer), m_id(_Options == RowMajor ? mat.m_rowStartIndex[outer] : mat.m_colStartIndex[outer] + 1), m_start(m_id), m_end(_Options == RowMajor ? mat.m_rowStartIndex[outer + 1] : mat.m_colStartIndex[outer + 1] + 1) { } inline InnerLowerIterator & operator++() { m_id++; return *this; } inline InnerLowerIterator & operator+=(Index shift) { m_id += shift; return *this; } inline Scalar value() const { return m_matrix.m_data.lower(m_id); } inline Scalar* valuePtr() { return const_cast (&(m_matrix.m_data.lower(m_id))); } inline Scalar& valueRef() { return const_cast (m_matrix.m_data.lower(m_id)); } inline Index index() const { return IsRowMajor ? m_outer - m_matrix.m_data.lowerProfile(m_outer) + (m_id - m_start) : m_outer + (m_id - m_start) + 1; ; } inline Index row() const { return IsRowMajor ? m_outer : index(); } inline Index col() const { return IsRowMajor ? index() : m_outer; } inline size_t size() const { return m_matrix.m_data.lowerProfile(m_outer); } inline operator bool() const { return (m_id < m_end) && (m_id >= m_start); } protected: const SkylineMatrix& m_matrix; const Index m_outer; Index m_id; const Index m_start; const Index m_end; }; } // end namespace Eigen #endif // EIGEN_SkylineMatrix_H RcppEigen/inst/include/unsupported/Eigen/src/Skyline/SkylineUtil.h0000644000176200001440000000612112253717461025007 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Guillaume Saupin // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SKYLINEUTIL_H #define EIGEN_SKYLINEUTIL_H namespace Eigen { #ifdef NDEBUG #define EIGEN_DBG_SKYLINE(X) #else #define EIGEN_DBG_SKYLINE(X) X #endif const unsigned int SkylineBit = 0x1200; template class SkylineProduct; enum AdditionalProductEvaluationMode {SkylineTimeDenseProduct, SkylineTimeSkylineProduct, DenseTimeSkylineProduct}; enum {IsSkyline = SkylineBit}; #define EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \ template \ EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SkylineMatrixBase& other) \ { \ return Base::operator Op(other.derived()); \ } \ EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \ { \ return Base::operator Op(other); \ } #define EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \ template \ EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \ { \ return Base::operator Op(scalar); \ } #define EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \ EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \ EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \ EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \ EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \ EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=) #define _EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived, BaseClass) \ typedef BaseClass Base; \ typedef typename Eigen::internal::traits::Scalar Scalar; \ typedef typename Eigen::NumTraits::Real RealScalar; \ typedef typename Eigen::internal::traits::StorageKind StorageKind; \ typedef typename Eigen::internal::index::type Index; \ enum { Flags = Eigen::internal::traits::Flags, }; #define EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived) \ _EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived, Eigen::SkylineMatrixBase) template class SkylineMatrixBase; template class SkylineMatrix; template class DynamicSkylineMatrix; template class SkylineVector; template class MappedSkylineMatrix; namespace internal { template struct skyline_product_mode; template::value> struct SkylineProductReturnType; template class eval { typedef typename traits::Scalar _Scalar; enum { _Flags = traits::Flags }; public: typedef SkylineMatrix<_Scalar, _Flags> type; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_SKYLINEUTIL_H RcppEigen/inst/include/unsupported/Eigen/src/SpecialFunctions/0000755000176200001440000000000013100733213024176 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsPacketMath.h0000644000176200001440000000522513100733213031566 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPECIALFUNCTIONS_PACKETMATH_H #define EIGEN_SPECIALFUNCTIONS_PACKETMATH_H namespace Eigen { namespace internal { /** \internal \returns the ln(|gamma(\a a)|) (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plgamma(const Packet& a) { using numext::lgamma; return lgamma(a); } /** \internal \returns the derivative of lgamma, psi(\a a) (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pdigamma(const Packet& a) { using numext::digamma; return digamma(a); } /** \internal \returns the zeta function of two arguments (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pzeta(const Packet& x, const Packet& q) { using numext::zeta; return zeta(x, q); } /** \internal \returns the polygamma function (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet ppolygamma(const Packet& n, const Packet& x) { using numext::polygamma; return polygamma(n, x); } /** \internal \returns the erf(\a a) (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet perf(const Packet& a) { using numext::erf; return erf(a); } /** \internal \returns the erfc(\a a) (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet perfc(const Packet& a) { using numext::erfc; return erfc(a); } /** \internal \returns the incomplete gamma function igamma(\a a, \a x) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pigamma(const Packet& a, const Packet& x) { using numext::igamma; return igamma(a, x); } /** \internal \returns the complementary incomplete gamma function igammac(\a a, \a x) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pigammac(const Packet& a, const Packet& x) { using numext::igammac; return igammac(a, x); } /** \internal \returns the complementary incomplete gamma function betainc(\a a, \a b, \a x) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pbetainc(const Packet& a, const Packet& b,const Packet& x) { using numext::betainc; return betainc(a, b, x); } } // end namespace internal } // end namespace Eigen #endif // EIGEN_SPECIALFUNCTIONS_PACKETMATH_H RcppEigen/inst/include/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsHalf.h0000644000176200001440000000432513100733213030417 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPECIALFUNCTIONS_HALF_H #define EIGEN_SPECIALFUNCTIONS_HALF_H namespace Eigen { namespace numext { #if EIGEN_HAS_C99_MATH template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half lgamma(const Eigen::half& a) { return Eigen::half(Eigen::numext::lgamma(static_cast(a))); } template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half digamma(const Eigen::half& a) { return Eigen::half(Eigen::numext::digamma(static_cast(a))); } template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half zeta(const Eigen::half& x, const Eigen::half& q) { return Eigen::half(Eigen::numext::zeta(static_cast(x), static_cast(q))); } template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half polygamma(const Eigen::half& n, const Eigen::half& x) { return Eigen::half(Eigen::numext::polygamma(static_cast(n), static_cast(x))); } template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half erf(const Eigen::half& a) { return Eigen::half(Eigen::numext::erf(static_cast(a))); } template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half erfc(const Eigen::half& a) { return Eigen::half(Eigen::numext::erfc(static_cast(a))); } template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half igamma(const Eigen::half& a, const Eigen::half& x) { return Eigen::half(Eigen::numext::igamma(static_cast(a), static_cast(x))); } template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half igammac(const Eigen::half& a, const Eigen::half& x) { return Eigen::half(Eigen::numext::igammac(static_cast(a), static_cast(x))); } template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half betainc(const Eigen::half& a, const Eigen::half& b, const Eigen::half& x) { return Eigen::half(Eigen::numext::betainc(static_cast(a), static_cast(b), static_cast(x))); } #endif } // end namespace numext } // end namespace Eigen #endif // EIGEN_SPECIALFUNCTIONS_HALF_H RcppEigen/inst/include/unsupported/Eigen/src/SpecialFunctions/arch/0000755000176200001440000000000013100733213025113 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/SpecialFunctions/arch/CUDA/0000755000176200001440000000000013100733213025627 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/SpecialFunctions/arch/CUDA/CudaSpecialFunctions.h0000644000176200001440000001066013100733213032051 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CUDA_SPECIALFUNCTIONS_H #define EIGEN_CUDA_SPECIALFUNCTIONS_H namespace Eigen { namespace internal { // Make sure this is only available when targeting a GPU: we don't want to // introduce conflicts between these packet_traits definitions and the ones // we'll use on the host side (SSE, AVX, ...) #if defined(__CUDACC__) && defined(EIGEN_USE_GPU) template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 plgamma(const float4& a) { return make_float4(lgammaf(a.x), lgammaf(a.y), lgammaf(a.z), lgammaf(a.w)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 plgamma(const double2& a) { using numext::lgamma; return make_double2(lgamma(a.x), lgamma(a.y)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pdigamma(const float4& a) { using numext::digamma; return make_float4(digamma(a.x), digamma(a.y), digamma(a.z), digamma(a.w)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pdigamma(const double2& a) { using numext::digamma; return make_double2(digamma(a.x), digamma(a.y)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pzeta(const float4& x, const float4& q) { using numext::zeta; return make_float4(zeta(x.x, q.x), zeta(x.y, q.y), zeta(x.z, q.z), zeta(x.w, q.w)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pzeta(const double2& x, const double2& q) { using numext::zeta; return make_double2(zeta(x.x, q.x), zeta(x.y, q.y)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 ppolygamma(const float4& n, const float4& x) { using numext::polygamma; return make_float4(polygamma(n.x, x.x), polygamma(n.y, x.y), polygamma(n.z, x.z), polygamma(n.w, x.w)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 ppolygamma(const double2& n, const double2& x) { using numext::polygamma; return make_double2(polygamma(n.x, x.x), polygamma(n.y, x.y)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 perf(const float4& a) { return make_float4(erff(a.x), erff(a.y), erff(a.z), erff(a.w)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 perf(const double2& a) { using numext::erf; return make_double2(erf(a.x), erf(a.y)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 perfc(const float4& a) { using numext::erfc; return make_float4(erfc(a.x), erfc(a.y), erfc(a.z), erfc(a.w)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 perfc(const double2& a) { using numext::erfc; return make_double2(erfc(a.x), erfc(a.y)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pigamma(const float4& a, const float4& x) { using numext::igamma; return make_float4( igamma(a.x, x.x), igamma(a.y, x.y), igamma(a.z, x.z), igamma(a.w, x.w)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pigamma(const double2& a, const double2& x) { using numext::igamma; return make_double2(igamma(a.x, x.x), igamma(a.y, x.y)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pigammac(const float4& a, const float4& x) { using numext::igammac; return make_float4( igammac(a.x, x.x), igammac(a.y, x.y), igammac(a.z, x.z), igammac(a.w, x.w)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pigammac(const double2& a, const double2& x) { using numext::igammac; return make_double2(igammac(a.x, x.x), igammac(a.y, x.y)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pbetainc(const float4& a, const float4& b, const float4& x) { using numext::betainc; return make_float4( betainc(a.x, b.x, x.x), betainc(a.y, b.y, x.y), betainc(a.z, b.z, x.z), betainc(a.w, b.w, x.w)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pbetainc(const double2& a, const double2& b, const double2& x) { using numext::betainc; return make_double2(betainc(a.x, b.x, x.x), betainc(a.y, b.y, x.y)); } #endif } // end namespace internal } // end namespace Eigen #endif // EIGEN_CUDA_SPECIALFUNCTIONS_H RcppEigen/inst/include/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsImpl.h0000644000176200001440000012305313100733213030446 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Eugene Brevdo // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPECIAL_FUNCTIONS_H #define EIGEN_SPECIAL_FUNCTIONS_H namespace Eigen { namespace internal { // Parts of this code are based on the Cephes Math Library. // // Cephes Math Library Release 2.8: June, 2000 // Copyright 1984, 1987, 1992, 2000 by Stephen L. Moshier // // Permission has been kindly provided by the original author // to incorporate the Cephes software into the Eigen codebase: // // From: Stephen Moshier // To: Eugene Brevdo // Subject: Re: Permission to wrap several cephes functions in Eigen // // Hello Eugene, // // Thank you for writing. // // If your licensing is similar to BSD, the formal way that has been // handled is simply to add a statement to the effect that you are incorporating // the Cephes software by permission of the author. // // Good luck with your project, // Steve namespace cephes { /* polevl (modified for Eigen) * * Evaluate polynomial * * * * SYNOPSIS: * * int N; * Scalar x, y, coef[N+1]; * * y = polevl( x, coef); * * * * DESCRIPTION: * * Evaluates polynomial of degree N: * * 2 N * y = C + C x + C x +...+ C x * 0 1 2 N * * Coefficients are stored in reverse order: * * coef[0] = C , ..., coef[N] = C . * N 0 * * The function p1evl() assumes that coef[N] = 1.0 and is * omitted from the array. Its calling arguments are * otherwise the same as polevl(). * * * The Eigen implementation is templatized. For best speed, store * coef as a const array (constexpr), e.g. * * const double coef[] = {1.0, 2.0, 3.0, ...}; * */ template struct polevl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Scalar x, const Scalar coef[]) { EIGEN_STATIC_ASSERT((N > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); return polevl::run(x, coef) * x + coef[N]; } }; template struct polevl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Scalar, const Scalar coef[]) { return coef[0]; } }; } // end namespace cephes /**************************************************************************** * Implementation of lgamma, requires C++11/C99 * ****************************************************************************/ template struct lgamma_impl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Scalar) { EIGEN_STATIC_ASSERT((internal::is_same::value == false), THIS_TYPE_IS_NOT_SUPPORTED); return Scalar(0); } }; template struct lgamma_retval { typedef Scalar type; }; #if EIGEN_HAS_C99_MATH template <> struct lgamma_impl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE float run(float x) { #if !defined(__CUDA_ARCH__) && (defined(_BSD_SOURCE) || defined(_SVID_SOURCE)) && !defined(__APPLE__) int signgam; return ::lgammaf_r(x, &signgam); #else return ::lgammaf(x); #endif } }; template <> struct lgamma_impl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE double run(double x) { #if !defined(__CUDA_ARCH__) && (defined(_BSD_SOURCE) || defined(_SVID_SOURCE)) && !defined(__APPLE__) int signgam; return ::lgamma_r(x, &signgam); #else return ::lgamma(x); #endif } }; #endif /**************************************************************************** * Implementation of digamma (psi), based on Cephes * ****************************************************************************/ template struct digamma_retval { typedef Scalar type; }; /* * * Polynomial evaluation helper for the Psi (digamma) function. * * digamma_impl_maybe_poly::run(s) evaluates the asymptotic Psi expansion for * input Scalar s, assuming s is above 10.0. * * If s is above a certain threshold for the given Scalar type, zero * is returned. Otherwise the polynomial is evaluated with enough * coefficients for results matching Scalar machine precision. * * */ template struct digamma_impl_maybe_poly { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Scalar) { EIGEN_STATIC_ASSERT((internal::is_same::value == false), THIS_TYPE_IS_NOT_SUPPORTED); return Scalar(0); } }; template <> struct digamma_impl_maybe_poly { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE float run(const float s) { const float A[] = { -4.16666666666666666667E-3f, 3.96825396825396825397E-3f, -8.33333333333333333333E-3f, 8.33333333333333333333E-2f }; float z; if (s < 1.0e8f) { z = 1.0f / (s * s); return z * cephes::polevl::run(z, A); } else return 0.0f; } }; template <> struct digamma_impl_maybe_poly { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE double run(const double s) { const double A[] = { 8.33333333333333333333E-2, -2.10927960927960927961E-2, 7.57575757575757575758E-3, -4.16666666666666666667E-3, 3.96825396825396825397E-3, -8.33333333333333333333E-3, 8.33333333333333333333E-2 }; double z; if (s < 1.0e17) { z = 1.0 / (s * s); return z * cephes::polevl::run(z, A); } else return 0.0; } }; template struct digamma_impl { EIGEN_DEVICE_FUNC static Scalar run(Scalar x) { /* * * Psi (digamma) function (modified for Eigen) * * * SYNOPSIS: * * double x, y, psi(); * * y = psi( x ); * * * DESCRIPTION: * * d - * psi(x) = -- ln | (x) * dx * * is the logarithmic derivative of the gamma function. * For integer x, * n-1 * - * psi(n) = -EUL + > 1/k. * - * k=1 * * If x is negative, it is transformed to a positive argument by the * reflection formula psi(1-x) = psi(x) + pi cot(pi x). * For general positive x, the argument is made greater than 10 * using the recurrence psi(x+1) = psi(x) + 1/x. * Then the following asymptotic expansion is applied: * * inf. B * - 2k * psi(x) = log(x) - 1/2x - > ------- * - 2k * k=1 2k x * * where the B2k are Bernoulli numbers. * * ACCURACY (float): * Relative error (except absolute when |psi| < 1): * arithmetic domain # trials peak rms * IEEE 0,30 30000 1.3e-15 1.4e-16 * IEEE -30,0 40000 1.5e-15 2.2e-16 * * ACCURACY (double): * Absolute error, relative when |psi| > 1 : * arithmetic domain # trials peak rms * IEEE -33,0 30000 8.2e-7 1.2e-7 * IEEE 0,33 100000 7.3e-7 7.7e-8 * * ERROR MESSAGES: * message condition value returned * psi singularity x integer <=0 INFINITY */ Scalar p, q, nz, s, w, y; bool negative = false; const Scalar maxnum = NumTraits::infinity(); const Scalar m_pi = Scalar(EIGEN_PI); const Scalar zero = Scalar(0); const Scalar one = Scalar(1); const Scalar half = Scalar(0.5); nz = zero; if (x <= zero) { negative = true; q = x; p = numext::floor(q); if (p == q) { return maxnum; } /* Remove the zeros of tan(m_pi x) * by subtracting the nearest integer from x */ nz = q - p; if (nz != half) { if (nz > half) { p += one; nz = q - p; } nz = m_pi / numext::tan(m_pi * nz); } else { nz = zero; } x = one - x; } /* use the recurrence psi(x+1) = psi(x) + 1/x. */ s = x; w = zero; while (s < Scalar(10)) { w += one / s; s += one; } y = digamma_impl_maybe_poly::run(s); y = numext::log(s) - (half / s) - y - w; return (negative) ? y - nz : y; } }; /**************************************************************************** * Implementation of erf, requires C++11/C99 * ****************************************************************************/ template struct erf_impl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Scalar) { EIGEN_STATIC_ASSERT((internal::is_same::value == false), THIS_TYPE_IS_NOT_SUPPORTED); return Scalar(0); } }; template struct erf_retval { typedef Scalar type; }; #if EIGEN_HAS_C99_MATH template <> struct erf_impl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE float run(float x) { return ::erff(x); } }; template <> struct erf_impl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE double run(double x) { return ::erf(x); } }; #endif // EIGEN_HAS_C99_MATH /*************************************************************************** * Implementation of erfc, requires C++11/C99 * ****************************************************************************/ template struct erfc_impl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Scalar) { EIGEN_STATIC_ASSERT((internal::is_same::value == false), THIS_TYPE_IS_NOT_SUPPORTED); return Scalar(0); } }; template struct erfc_retval { typedef Scalar type; }; #if EIGEN_HAS_C99_MATH template <> struct erfc_impl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE float run(const float x) { return ::erfcf(x); } }; template <> struct erfc_impl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE double run(const double x) { return ::erfc(x); } }; #endif // EIGEN_HAS_C99_MATH /************************************************************************************************************** * Implementation of igammac (complemented incomplete gamma integral), based on Cephes but requires C++11/C99 * **************************************************************************************************************/ template struct igammac_retval { typedef Scalar type; }; // NOTE: cephes_helper is also used to implement zeta template struct cephes_helper { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar machep() { assert(false && "machep not supported for this type"); return 0.0; } EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar big() { assert(false && "big not supported for this type"); return 0.0; } EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar biginv() { assert(false && "biginv not supported for this type"); return 0.0; } }; template <> struct cephes_helper { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE float machep() { return NumTraits::epsilon() / 2; // 1.0 - machep == 1.0 } EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE float big() { // use epsneg (1.0 - epsneg == 1.0) return 1.0f / (NumTraits::epsilon() / 2); } EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE float biginv() { // epsneg return machep(); } }; template <> struct cephes_helper { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE double machep() { return NumTraits::epsilon() / 2; // 1.0 - machep == 1.0 } EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE double big() { return 1.0 / NumTraits::epsilon(); } EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE double biginv() { // inverse of eps return NumTraits::epsilon(); } }; #if !EIGEN_HAS_C99_MATH template struct igammac_impl { EIGEN_DEVICE_FUNC static Scalar run(Scalar a, Scalar x) { EIGEN_STATIC_ASSERT((internal::is_same::value == false), THIS_TYPE_IS_NOT_SUPPORTED); return Scalar(0); } }; #else template struct igamma_impl; // predeclare igamma_impl template struct igammac_impl { EIGEN_DEVICE_FUNC static Scalar run(Scalar a, Scalar x) { /* igamc() * * Incomplete gamma integral (modified for Eigen) * * * * SYNOPSIS: * * double a, x, y, igamc(); * * y = igamc( a, x ); * * DESCRIPTION: * * The function is defined by * * * igamc(a,x) = 1 - igam(a,x) * * inf. * - * 1 | | -t a-1 * = ----- | e t dt. * - | | * | (a) - * x * * * In this implementation both arguments must be positive. * The integral is evaluated by either a power series or * continued fraction expansion, depending on the relative * values of a and x. * * ACCURACY (float): * * Relative error: * arithmetic domain # trials peak rms * IEEE 0,30 30000 7.8e-6 5.9e-7 * * * ACCURACY (double): * * Tested at random a, x. * a x Relative error: * arithmetic domain domain # trials peak rms * IEEE 0.5,100 0,100 200000 1.9e-14 1.7e-15 * IEEE 0.01,0.5 0,100 200000 1.4e-13 1.6e-15 * */ /* Cephes Math Library Release 2.2: June, 1992 Copyright 1985, 1987, 1992 by Stephen L. Moshier Direct inquiries to 30 Frost Street, Cambridge, MA 02140 */ const Scalar zero = 0; const Scalar one = 1; const Scalar nan = NumTraits::quiet_NaN(); if ((x < zero) || (a <= zero)) { // domain error return nan; } if ((x < one) || (x < a)) { /* The checks above ensure that we meet the preconditions for * igamma_impl::Impl(), so call it, rather than igamma_impl::Run(). * Calling Run() would also work, but in that case the compiler may not be * able to prove that igammac_impl::Run and igamma_impl::Run are not * mutually recursive. This leads to worse code, particularly on * platforms like nvptx, where recursion is allowed only begrudgingly. */ return (one - igamma_impl::Impl(a, x)); } return Impl(a, x); } private: /* igamma_impl calls igammac_impl::Impl. */ friend struct igamma_impl; /* Actually computes igamc(a, x). * * Preconditions: * a > 0 * x >= 1 * x >= a */ EIGEN_DEVICE_FUNC static Scalar Impl(Scalar a, Scalar x) { const Scalar zero = 0; const Scalar one = 1; const Scalar two = 2; const Scalar machep = cephes_helper::machep(); const Scalar maxlog = numext::log(NumTraits::highest()); const Scalar big = cephes_helper::big(); const Scalar biginv = cephes_helper::biginv(); const Scalar inf = NumTraits::infinity(); Scalar ans, ax, c, yc, r, t, y, z; Scalar pk, pkm1, pkm2, qk, qkm1, qkm2; if (x == inf) return zero; // std::isinf crashes on CUDA /* Compute x**a * exp(-x) / gamma(a) */ ax = a * numext::log(x) - x - lgamma_impl::run(a); if (ax < -maxlog) { // underflow return zero; } ax = numext::exp(ax); // continued fraction y = one - a; z = x + y + one; c = zero; pkm2 = one; qkm2 = x; pkm1 = x + one; qkm1 = z * x; ans = pkm1 / qkm1; while (true) { c += one; y += one; z += two; yc = y * c; pk = pkm1 * z - pkm2 * yc; qk = qkm1 * z - qkm2 * yc; if (qk != zero) { r = pk / qk; t = numext::abs((ans - r) / r); ans = r; } else { t = one; } pkm2 = pkm1; pkm1 = pk; qkm2 = qkm1; qkm1 = qk; if (numext::abs(pk) > big) { pkm2 *= biginv; pkm1 *= biginv; qkm2 *= biginv; qkm1 *= biginv; } if (t <= machep) { break; } } return (ans * ax); } }; #endif // EIGEN_HAS_C99_MATH /************************************************************************************************ * Implementation of igamma (incomplete gamma integral), based on Cephes but requires C++11/C99 * ************************************************************************************************/ template struct igamma_retval { typedef Scalar type; }; #if !EIGEN_HAS_C99_MATH template struct igamma_impl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(Scalar a, Scalar x) { EIGEN_STATIC_ASSERT((internal::is_same::value == false), THIS_TYPE_IS_NOT_SUPPORTED); return Scalar(0); } }; #else template struct igamma_impl { EIGEN_DEVICE_FUNC static Scalar run(Scalar a, Scalar x) { /* igam() * Incomplete gamma integral * * * * SYNOPSIS: * * double a, x, y, igam(); * * y = igam( a, x ); * * DESCRIPTION: * * The function is defined by * * x * - * 1 | | -t a-1 * igam(a,x) = ----- | e t dt. * - | | * | (a) - * 0 * * * In this implementation both arguments must be positive. * The integral is evaluated by either a power series or * continued fraction expansion, depending on the relative * values of a and x. * * ACCURACY (double): * * Relative error: * arithmetic domain # trials peak rms * IEEE 0,30 200000 3.6e-14 2.9e-15 * IEEE 0,100 300000 9.9e-14 1.5e-14 * * * ACCURACY (float): * * Relative error: * arithmetic domain # trials peak rms * IEEE 0,30 20000 7.8e-6 5.9e-7 * */ /* Cephes Math Library Release 2.2: June, 1992 Copyright 1985, 1987, 1992 by Stephen L. Moshier Direct inquiries to 30 Frost Street, Cambridge, MA 02140 */ /* left tail of incomplete gamma function: * * inf. k * a -x - x * x e > ---------- * - - * k=0 | (a+k+1) * */ const Scalar zero = 0; const Scalar one = 1; const Scalar nan = NumTraits::quiet_NaN(); if (x == zero) return zero; if ((x < zero) || (a <= zero)) { // domain error return nan; } if ((x > one) && (x > a)) { /* The checks above ensure that we meet the preconditions for * igammac_impl::Impl(), so call it, rather than igammac_impl::Run(). * Calling Run() would also work, but in that case the compiler may not be * able to prove that igammac_impl::Run and igamma_impl::Run are not * mutually recursive. This leads to worse code, particularly on * platforms like nvptx, where recursion is allowed only begrudgingly. */ return (one - igammac_impl::Impl(a, x)); } return Impl(a, x); } private: /* igammac_impl calls igamma_impl::Impl. */ friend struct igammac_impl; /* Actually computes igam(a, x). * * Preconditions: * x > 0 * a > 0 * !(x > 1 && x > a) */ EIGEN_DEVICE_FUNC static Scalar Impl(Scalar a, Scalar x) { const Scalar zero = 0; const Scalar one = 1; const Scalar machep = cephes_helper::machep(); const Scalar maxlog = numext::log(NumTraits::highest()); Scalar ans, ax, c, r; /* Compute x**a * exp(-x) / gamma(a) */ ax = a * numext::log(x) - x - lgamma_impl::run(a); if (ax < -maxlog) { // underflow return zero; } ax = numext::exp(ax); /* power series */ r = a; c = one; ans = one; while (true) { r += one; c *= x/r; ans += c; if (c/ans <= machep) { break; } } return (ans * ax / a); } }; #endif // EIGEN_HAS_C99_MATH /***************************************************************************** * Implementation of Riemann zeta function of two arguments, based on Cephes * *****************************************************************************/ template struct zeta_retval { typedef Scalar type; }; template struct zeta_impl_series { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Scalar) { EIGEN_STATIC_ASSERT((internal::is_same::value == false), THIS_TYPE_IS_NOT_SUPPORTED); return Scalar(0); } }; template <> struct zeta_impl_series { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE bool run(float& a, float& b, float& s, const float x, const float machep) { int i = 0; while(i < 9) { i += 1; a += 1.0f; b = numext::pow( a, -x ); s += b; if( numext::abs(b/s) < machep ) return true; } //Return whether we are done return false; } }; template <> struct zeta_impl_series { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE bool run(double& a, double& b, double& s, const double x, const double machep) { int i = 0; while( (i < 9) || (a <= 9.0) ) { i += 1; a += 1.0; b = numext::pow( a, -x ); s += b; if( numext::abs(b/s) < machep ) return true; } //Return whether we are done return false; } }; template struct zeta_impl { EIGEN_DEVICE_FUNC static Scalar run(Scalar x, Scalar q) { /* zeta.c * * Riemann zeta function of two arguments * * * * SYNOPSIS: * * double x, q, y, zeta(); * * y = zeta( x, q ); * * * * DESCRIPTION: * * * * inf. * - -x * zeta(x,q) = > (k+q) * - * k=0 * * where x > 1 and q is not a negative integer or zero. * The Euler-Maclaurin summation formula is used to obtain * the expansion * * n * - -x * zeta(x,q) = > (k+q) * - * k=1 * * 1-x inf. B x(x+1)...(x+2j) * (n+q) 1 - 2j * + --------- - ------- + > -------------------- * x-1 x - x+2j+1 * 2(n+q) j=1 (2j)! (n+q) * * where the B2j are Bernoulli numbers. Note that (see zetac.c) * zeta(x,1) = zetac(x) + 1. * * * * ACCURACY: * * Relative error for single precision: * arithmetic domain # trials peak rms * IEEE 0,25 10000 6.9e-7 1.0e-7 * * Large arguments may produce underflow in powf(), in which * case the results are inaccurate. * * REFERENCE: * * Gradshteyn, I. S., and I. M. Ryzhik, Tables of Integrals, * Series, and Products, p. 1073; Academic Press, 1980. * */ int i; Scalar p, r, a, b, k, s, t, w; const Scalar A[] = { Scalar(12.0), Scalar(-720.0), Scalar(30240.0), Scalar(-1209600.0), Scalar(47900160.0), Scalar(-1.8924375803183791606e9), /*1.307674368e12/691*/ Scalar(7.47242496e10), Scalar(-2.950130727918164224e12), /*1.067062284288e16/3617*/ Scalar(1.1646782814350067249e14), /*5.109094217170944e18/43867*/ Scalar(-4.5979787224074726105e15), /*8.028576626982912e20/174611*/ Scalar(1.8152105401943546773e17), /*1.5511210043330985984e23/854513*/ Scalar(-7.1661652561756670113e18) /*1.6938241367317436694528e27/236364091*/ }; const Scalar maxnum = NumTraits::infinity(); const Scalar zero = 0.0, half = 0.5, one = 1.0; const Scalar machep = cephes_helper::machep(); const Scalar nan = NumTraits::quiet_NaN(); if( x == one ) return maxnum; if( x < one ) { return nan; } if( q <= zero ) { if(q == numext::floor(q)) { return maxnum; } p = x; r = numext::floor(p); if (p != r) return nan; } /* Permit negative q but continue sum until n+q > +9 . * This case should be handled by a reflection formula. * If q<0 and x is an integer, there is a relation to * the polygamma function. */ s = numext::pow( q, -x ); a = q; b = zero; // Run the summation in a helper function that is specific to the floating precision if (zeta_impl_series::run(a, b, s, x, machep)) { return s; } w = a; s += b*w/(x-one); s -= half * b; a = one; k = zero; for( i=0; i<12; i++ ) { a *= x + k; b /= w; t = a*b/A[i]; s = s + t; t = numext::abs(t/s); if( t < machep ) { break; } k += one; a *= x + k; b /= w; k += one; } return s; } }; /**************************************************************************** * Implementation of polygamma function, requires C++11/C99 * ****************************************************************************/ template struct polygamma_retval { typedef Scalar type; }; #if !EIGEN_HAS_C99_MATH template struct polygamma_impl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(Scalar n, Scalar x) { EIGEN_STATIC_ASSERT((internal::is_same::value == false), THIS_TYPE_IS_NOT_SUPPORTED); return Scalar(0); } }; #else template struct polygamma_impl { EIGEN_DEVICE_FUNC static Scalar run(Scalar n, Scalar x) { Scalar zero = 0.0, one = 1.0; Scalar nplus = n + one; const Scalar nan = NumTraits::quiet_NaN(); // Check that n is an integer if (numext::floor(n) != n) { return nan; } // Just return the digamma function for n = 1 else if (n == zero) { return digamma_impl::run(x); } // Use the same implementation as scipy else { Scalar factorial = numext::exp(lgamma_impl::run(nplus)); return numext::pow(-one, nplus) * factorial * zeta_impl::run(nplus, x); } } }; #endif // EIGEN_HAS_C99_MATH /************************************************************************************************ * Implementation of betainc (incomplete beta integral), based on Cephes but requires C++11/C99 * ************************************************************************************************/ template struct betainc_retval { typedef Scalar type; }; #if !EIGEN_HAS_C99_MATH template struct betainc_impl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(Scalar a, Scalar b, Scalar x) { EIGEN_STATIC_ASSERT((internal::is_same::value == false), THIS_TYPE_IS_NOT_SUPPORTED); return Scalar(0); } }; #else template struct betainc_impl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(Scalar, Scalar, Scalar) { /* betaincf.c * * Incomplete beta integral * * * SYNOPSIS: * * float a, b, x, y, betaincf(); * * y = betaincf( a, b, x ); * * * DESCRIPTION: * * Returns incomplete beta integral of the arguments, evaluated * from zero to x. The function is defined as * * x * - - * | (a+b) | | a-1 b-1 * ----------- | t (1-t) dt. * - - | | * | (a) | (b) - * 0 * * The domain of definition is 0 <= x <= 1. In this * implementation a and b are restricted to positive values. * The integral from x to 1 may be obtained by the symmetry * relation * * 1 - betainc( a, b, x ) = betainc( b, a, 1-x ). * * The integral is evaluated by a continued fraction expansion. * If a < 1, the function calls itself recursively after a * transformation to increase a to a+1. * * ACCURACY (float): * * Tested at random points (a,b,x) with a and b in the indicated * interval and x between 0 and 1. * * arithmetic domain # trials peak rms * Relative error: * IEEE 0,30 10000 3.7e-5 5.1e-6 * IEEE 0,100 10000 1.7e-4 2.5e-5 * The useful domain for relative error is limited by underflow * of the single precision exponential function. * Absolute error: * IEEE 0,30 100000 2.2e-5 9.6e-7 * IEEE 0,100 10000 6.5e-5 3.7e-6 * * Larger errors may occur for extreme ratios of a and b. * * ACCURACY (double): * arithmetic domain # trials peak rms * IEEE 0,5 10000 6.9e-15 4.5e-16 * IEEE 0,85 250000 2.2e-13 1.7e-14 * IEEE 0,1000 30000 5.3e-12 6.3e-13 * IEEE 0,10000 250000 9.3e-11 7.1e-12 * IEEE 0,100000 10000 8.7e-10 4.8e-11 * Outputs smaller than the IEEE gradual underflow threshold * were excluded from these statistics. * * ERROR MESSAGES: * message condition value returned * incbet domain x<0, x>1 nan * incbet underflow nan */ EIGEN_STATIC_ASSERT((internal::is_same::value == false), THIS_TYPE_IS_NOT_SUPPORTED); return Scalar(0); } }; /* Continued fraction expansion #1 for incomplete beta integral (small_branch = True) * Continued fraction expansion #2 for incomplete beta integral (small_branch = False) */ template struct incbeta_cfe { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(Scalar a, Scalar b, Scalar x, bool small_branch) { EIGEN_STATIC_ASSERT((internal::is_same::value || internal::is_same::value), THIS_TYPE_IS_NOT_SUPPORTED); const Scalar big = cephes_helper::big(); const Scalar machep = cephes_helper::machep(); const Scalar biginv = cephes_helper::biginv(); const Scalar zero = 0; const Scalar one = 1; const Scalar two = 2; Scalar xk, pk, pkm1, pkm2, qk, qkm1, qkm2; Scalar k1, k2, k3, k4, k5, k6, k7, k8, k26update; Scalar ans; int n; const int num_iters = (internal::is_same::value) ? 100 : 300; const Scalar thresh = (internal::is_same::value) ? machep : Scalar(3) * machep; Scalar r = (internal::is_same::value) ? zero : one; if (small_branch) { k1 = a; k2 = a + b; k3 = a; k4 = a + one; k5 = one; k6 = b - one; k7 = k4; k8 = a + two; k26update = one; } else { k1 = a; k2 = b - one; k3 = a; k4 = a + one; k5 = one; k6 = a + b; k7 = a + one; k8 = a + two; k26update = -one; x = x / (one - x); } pkm2 = zero; qkm2 = one; pkm1 = one; qkm1 = one; ans = one; n = 0; do { xk = -(x * k1 * k2) / (k3 * k4); pk = pkm1 + pkm2 * xk; qk = qkm1 + qkm2 * xk; pkm2 = pkm1; pkm1 = pk; qkm2 = qkm1; qkm1 = qk; xk = (x * k5 * k6) / (k7 * k8); pk = pkm1 + pkm2 * xk; qk = qkm1 + qkm2 * xk; pkm2 = pkm1; pkm1 = pk; qkm2 = qkm1; qkm1 = qk; if (qk != zero) { r = pk / qk; if (numext::abs(ans - r) < numext::abs(r) * thresh) { return r; } ans = r; } k1 += one; k2 += k26update; k3 += two; k4 += two; k5 += one; k6 -= k26update; k7 += two; k8 += two; if ((numext::abs(qk) + numext::abs(pk)) > big) { pkm2 *= biginv; pkm1 *= biginv; qkm2 *= biginv; qkm1 *= biginv; } if ((numext::abs(qk) < biginv) || (numext::abs(pk) < biginv)) { pkm2 *= big; pkm1 *= big; qkm2 *= big; qkm1 *= big; } } while (++n < num_iters); return ans; } }; /* Helper functions depending on the Scalar type */ template struct betainc_helper {}; template <> struct betainc_helper { /* Core implementation, assumes a large (> 1.0) */ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE float incbsa(float aa, float bb, float xx) { float ans, a, b, t, x, onemx; bool reversed_a_b = false; onemx = 1.0f - xx; /* see if x is greater than the mean */ if (xx > (aa / (aa + bb))) { reversed_a_b = true; a = bb; b = aa; t = xx; x = onemx; } else { a = aa; b = bb; t = onemx; x = xx; } /* Choose expansion for optimal convergence */ if (b > 10.0f) { if (numext::abs(b * x / a) < 0.3f) { t = betainc_helper::incbps(a, b, x); if (reversed_a_b) t = 1.0f - t; return t; } } ans = x * (a + b - 2.0f) / (a - 1.0f); if (ans < 1.0f) { ans = incbeta_cfe::run(a, b, x, true /* small_branch */); t = b * numext::log(t); } else { ans = incbeta_cfe::run(a, b, x, false /* small_branch */); t = (b - 1.0f) * numext::log(t); } t += a * numext::log(x) + lgamma_impl::run(a + b) - lgamma_impl::run(a) - lgamma_impl::run(b); t += numext::log(ans / a); t = numext::exp(t); if (reversed_a_b) t = 1.0f - t; return t; } EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE float incbps(float a, float b, float x) { float t, u, y, s; const float machep = cephes_helper::machep(); y = a * numext::log(x) + (b - 1.0f) * numext::log1p(-x) - numext::log(a); y -= lgamma_impl::run(a) + lgamma_impl::run(b); y += lgamma_impl::run(a + b); t = x / (1.0f - x); s = 0.0f; u = 1.0f; do { b -= 1.0f; if (b == 0.0f) { break; } a += 1.0f; u *= t * b / a; s += u; } while (numext::abs(u) > machep); return numext::exp(y) * (1.0f + s); } }; template <> struct betainc_impl { EIGEN_DEVICE_FUNC static float run(float a, float b, float x) { const float nan = NumTraits::quiet_NaN(); float ans, t; if (a <= 0.0f) return nan; if (b <= 0.0f) return nan; if ((x <= 0.0f) || (x >= 1.0f)) { if (x == 0.0f) return 0.0f; if (x == 1.0f) return 1.0f; // mtherr("betaincf", DOMAIN); return nan; } /* transformation for small aa */ if (a <= 1.0f) { ans = betainc_helper::incbsa(a + 1.0f, b, x); t = a * numext::log(x) + b * numext::log1p(-x) + lgamma_impl::run(a + b) - lgamma_impl::run(a + 1.0f) - lgamma_impl::run(b); return (ans + numext::exp(t)); } else { return betainc_helper::incbsa(a, b, x); } } }; template <> struct betainc_helper { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE double incbps(double a, double b, double x) { const double machep = cephes_helper::machep(); double s, t, u, v, n, t1, z, ai; ai = 1.0 / a; u = (1.0 - b) * x; v = u / (a + 1.0); t1 = v; t = u; n = 2.0; s = 0.0; z = machep * ai; while (numext::abs(v) > z) { u = (n - b) * x / n; t *= u; v = t / (a + n); s += v; n += 1.0; } s += t1; s += ai; u = a * numext::log(x); // TODO: gamma() is not directly implemented in Eigen. /* if ((a + b) < maxgam && numext::abs(u) < maxlog) { t = gamma(a + b) / (gamma(a) * gamma(b)); s = s * t * pow(x, a); } else { */ t = lgamma_impl::run(a + b) - lgamma_impl::run(a) - lgamma_impl::run(b) + u + numext::log(s); return s = numext::exp(t); } }; template <> struct betainc_impl { EIGEN_DEVICE_FUNC static double run(double aa, double bb, double xx) { const double nan = NumTraits::quiet_NaN(); const double machep = cephes_helper::machep(); // const double maxgam = 171.624376956302725; double a, b, t, x, xc, w, y; bool reversed_a_b = false; if (aa <= 0.0 || bb <= 0.0) { return nan; // goto domerr; } if ((xx <= 0.0) || (xx >= 1.0)) { if (xx == 0.0) return (0.0); if (xx == 1.0) return (1.0); // mtherr("incbet", DOMAIN); return nan; } if ((bb * xx) <= 1.0 && xx <= 0.95) { return betainc_helper::incbps(aa, bb, xx); } w = 1.0 - xx; /* Reverse a and b if x is greater than the mean. */ if (xx > (aa / (aa + bb))) { reversed_a_b = true; a = bb; b = aa; xc = xx; x = w; } else { a = aa; b = bb; xc = w; x = xx; } if (reversed_a_b && (b * x) <= 1.0 && x <= 0.95) { t = betainc_helper::incbps(a, b, x); if (t <= machep) { t = 1.0 - machep; } else { t = 1.0 - t; } return t; } /* Choose expansion for better convergence. */ y = x * (a + b - 2.0) - (a - 1.0); if (y < 0.0) { w = incbeta_cfe::run(a, b, x, true /* small_branch */); } else { w = incbeta_cfe::run(a, b, x, false /* small_branch */) / xc; } /* Multiply w by the factor a b _ _ _ x (1-x) | (a+b) / ( a | (a) | (b) ) . */ y = a * numext::log(x); t = b * numext::log(xc); // TODO: gamma is not directly implemented in Eigen. /* if ((a + b) < maxgam && numext::abs(y) < maxlog && numext::abs(t) < maxlog) { t = pow(xc, b); t *= pow(x, a); t /= a; t *= w; t *= gamma(a + b) / (gamma(a) * gamma(b)); } else { */ /* Resort to logarithms. */ y += t + lgamma_impl::run(a + b) - lgamma_impl::run(a) - lgamma_impl::run(b); y += numext::log(w / a); t = numext::exp(y); /* } */ // done: if (reversed_a_b) { if (t <= machep) { t = 1.0 - machep; } else { t = 1.0 - t; } } return t; } }; #endif // EIGEN_HAS_C99_MATH } // end namespace internal namespace numext { template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(lgamma, Scalar) lgamma(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(lgamma, Scalar)::run(x); } template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(digamma, Scalar) digamma(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(digamma, Scalar)::run(x); } template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(zeta, Scalar) zeta(const Scalar& x, const Scalar& q) { return EIGEN_MATHFUNC_IMPL(zeta, Scalar)::run(x, q); } template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(polygamma, Scalar) polygamma(const Scalar& n, const Scalar& x) { return EIGEN_MATHFUNC_IMPL(polygamma, Scalar)::run(n, x); } template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(erf, Scalar) erf(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(erf, Scalar)::run(x); } template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(erfc, Scalar) erfc(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(erfc, Scalar)::run(x); } template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(igamma, Scalar) igamma(const Scalar& a, const Scalar& x) { return EIGEN_MATHFUNC_IMPL(igamma, Scalar)::run(a, x); } template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(igammac, Scalar) igammac(const Scalar& a, const Scalar& x) { return EIGEN_MATHFUNC_IMPL(igammac, Scalar)::run(a, x); } template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(betainc, Scalar) betainc(const Scalar& a, const Scalar& b, const Scalar& x) { return EIGEN_MATHFUNC_IMPL(betainc, Scalar)::run(a, b, x); } } // end namespace numext } // end namespace Eigen #endif // EIGEN_SPECIAL_FUNCTIONS_H RcppEigen/inst/include/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsArrayAPI.h0000644000176200001440000001250113100733213031150 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPECIALFUNCTIONS_ARRAYAPI_H #define EIGEN_SPECIALFUNCTIONS_ARRAYAPI_H namespace Eigen { /** \cpp11 \returns an expression of the coefficient-wise igamma(\a a, \a x) to the given arrays. * * This function computes the coefficient-wise incomplete gamma function. * * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types, * or float/double in non c++11 mode, the user has to provide implementations of igammac(T,T) for any scalar * type T to be supported. * * \sa Eigen::igammac(), Eigen::lgamma() */ template inline const Eigen::CwiseBinaryOp, const Derived, const ExponentDerived> igamma(const Eigen::ArrayBase& a, const Eigen::ArrayBase& x) { return Eigen::CwiseBinaryOp, const Derived, const ExponentDerived>( a.derived(), x.derived() ); } /** \cpp11 \returns an expression of the coefficient-wise igammac(\a a, \a x) to the given arrays. * * This function computes the coefficient-wise complementary incomplete gamma function. * * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types, * or float/double in non c++11 mode, the user has to provide implementations of igammac(T,T) for any scalar * type T to be supported. * * \sa Eigen::igamma(), Eigen::lgamma() */ template inline const Eigen::CwiseBinaryOp, const Derived, const ExponentDerived> igammac(const Eigen::ArrayBase& a, const Eigen::ArrayBase& x) { return Eigen::CwiseBinaryOp, const Derived, const ExponentDerived>( a.derived(), x.derived() ); } /** \cpp11 \returns an expression of the coefficient-wise polygamma(\a n, \a x) to the given arrays. * * It returns the \a n -th derivative of the digamma(psi) evaluated at \c x. * * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types, * or float/double in non c++11 mode, the user has to provide implementations of polygamma(T,T) for any scalar * type T to be supported. * * \sa Eigen::digamma() */ // * \warning Be careful with the order of the parameters: x.polygamma(n) is equivalent to polygamma(n,x) // * \sa ArrayBase::polygamma() template inline const Eigen::CwiseBinaryOp, const DerivedN, const DerivedX> polygamma(const Eigen::ArrayBase& n, const Eigen::ArrayBase& x) { return Eigen::CwiseBinaryOp, const DerivedN, const DerivedX>( n.derived(), x.derived() ); } /** \cpp11 \returns an expression of the coefficient-wise betainc(\a x, \a a, \a b) to the given arrays. * * This function computes the regularized incomplete beta function (integral). * * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types, * or float/double in non c++11 mode, the user has to provide implementations of betainc(T,T,T) for any scalar * type T to be supported. * * \sa Eigen::betainc(), Eigen::lgamma() */ template inline const Eigen::CwiseTernaryOp, const ArgADerived, const ArgBDerived, const ArgXDerived> betainc(const Eigen::ArrayBase& a, const Eigen::ArrayBase& b, const Eigen::ArrayBase& x) { return Eigen::CwiseTernaryOp, const ArgADerived, const ArgBDerived, const ArgXDerived>( a.derived(), b.derived(), x.derived() ); } /** \returns an expression of the coefficient-wise zeta(\a x, \a q) to the given arrays. * * It returns the Riemann zeta function of two arguments \a x and \a q: * * \param x is the exposent, it must be > 1 * \param q is the shift, it must be > 0 * * \note This function supports only float and double scalar types. To support other scalar types, the user has * to provide implementations of zeta(T,T) for any scalar type T to be supported. * * \sa ArrayBase::zeta() */ template inline const Eigen::CwiseBinaryOp, const DerivedX, const DerivedQ> zeta(const Eigen::ArrayBase& x, const Eigen::ArrayBase& q) { return Eigen::CwiseBinaryOp, const DerivedX, const DerivedQ>( x.derived(), q.derived() ); } } // end namespace Eigen #endif // EIGEN_SPECIALFUNCTIONS_ARRAYAPI_H RcppEigen/inst/include/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsFunctors.h0000644000176200001440000001734313100733213031354 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Eugene Brevdo // Copyright (C) 2016 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPECIALFUNCTIONS_FUNCTORS_H #define EIGEN_SPECIALFUNCTIONS_FUNCTORS_H namespace Eigen { namespace internal { /** \internal * \brief Template functor to compute the incomplete gamma function igamma(a, x) * * \sa class CwiseBinaryOp, Cwise::igamma */ template struct scalar_igamma_op : binary_op_base { EIGEN_EMPTY_STRUCT_CTOR(scalar_igamma_op) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& x) const { using numext::igamma; return igamma(a, x); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& x) const { return internal::pigamma(a, x); } }; template struct functor_traits > { enum { // Guesstimate Cost = 20 * NumTraits::MulCost + 10 * NumTraits::AddCost, PacketAccess = packet_traits::HasIGamma }; }; /** \internal * \brief Template functor to compute the complementary incomplete gamma function igammac(a, x) * * \sa class CwiseBinaryOp, Cwise::igammac */ template struct scalar_igammac_op : binary_op_base { EIGEN_EMPTY_STRUCT_CTOR(scalar_igammac_op) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& x) const { using numext::igammac; return igammac(a, x); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& x) const { return internal::pigammac(a, x); } }; template struct functor_traits > { enum { // Guesstimate Cost = 20 * NumTraits::MulCost + 10 * NumTraits::AddCost, PacketAccess = packet_traits::HasIGammac }; }; /** \internal * \brief Template functor to compute the incomplete beta integral betainc(a, b, x) * */ template struct scalar_betainc_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_betainc_op) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& x, const Scalar& a, const Scalar& b) const { using numext::betainc; return betainc(x, a, b); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& x, const Packet& a, const Packet& b) const { return internal::pbetainc(x, a, b); } }; template struct functor_traits > { enum { // Guesstimate Cost = 400 * NumTraits::MulCost + 400 * NumTraits::AddCost, PacketAccess = packet_traits::HasBetaInc }; }; /** \internal * \brief Template functor to compute the natural log of the absolute * value of Gamma of a scalar * \sa class CwiseUnaryOp, Cwise::lgamma() */ template struct scalar_lgamma_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_lgamma_op) EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { using numext::lgamma; return lgamma(a); } typedef typename packet_traits::type Packet; EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::plgamma(a); } }; template struct functor_traits > { enum { // Guesstimate Cost = 10 * NumTraits::MulCost + 5 * NumTraits::AddCost, PacketAccess = packet_traits::HasLGamma }; }; /** \internal * \brief Template functor to compute psi, the derivative of lgamma of a scalar. * \sa class CwiseUnaryOp, Cwise::digamma() */ template struct scalar_digamma_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_digamma_op) EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { using numext::digamma; return digamma(a); } typedef typename packet_traits::type Packet; EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pdigamma(a); } }; template struct functor_traits > { enum { // Guesstimate Cost = 10 * NumTraits::MulCost + 5 * NumTraits::AddCost, PacketAccess = packet_traits::HasDiGamma }; }; /** \internal * \brief Template functor to compute the Riemann Zeta function of two arguments. * \sa class CwiseUnaryOp, Cwise::zeta() */ template struct scalar_zeta_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_zeta_op) EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& x, const Scalar& q) const { using numext::zeta; return zeta(x, q); } typedef typename packet_traits::type Packet; EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& x, const Packet& q) const { return internal::pzeta(x, q); } }; template struct functor_traits > { enum { // Guesstimate Cost = 10 * NumTraits::MulCost + 5 * NumTraits::AddCost, PacketAccess = packet_traits::HasZeta }; }; /** \internal * \brief Template functor to compute the polygamma function. * \sa class CwiseUnaryOp, Cwise::polygamma() */ template struct scalar_polygamma_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_polygamma_op) EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& n, const Scalar& x) const { using numext::polygamma; return polygamma(n, x); } typedef typename packet_traits::type Packet; EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& n, const Packet& x) const { return internal::ppolygamma(n, x); } }; template struct functor_traits > { enum { // Guesstimate Cost = 10 * NumTraits::MulCost + 5 * NumTraits::AddCost, PacketAccess = packet_traits::HasPolygamma }; }; /** \internal * \brief Template functor to compute the Gauss error function of a * scalar * \sa class CwiseUnaryOp, Cwise::erf() */ template struct scalar_erf_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_erf_op) EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { using numext::erf; return erf(a); } typedef typename packet_traits::type Packet; EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::perf(a); } }; template struct functor_traits > { enum { // Guesstimate Cost = 10 * NumTraits::MulCost + 5 * NumTraits::AddCost, PacketAccess = packet_traits::HasErf }; }; /** \internal * \brief Template functor to compute the Complementary Error Function * of a scalar * \sa class CwiseUnaryOp, Cwise::erfc() */ template struct scalar_erfc_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_erfc_op) EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { using numext::erfc; return erfc(a); } typedef typename packet_traits::type Packet; EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::perfc(a); } }; template struct functor_traits > { enum { // Guesstimate Cost = 10 * NumTraits::MulCost + 5 * NumTraits::AddCost, PacketAccess = packet_traits::HasErfc }; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_SPECIALFUNCTIONS_FUNCTORS_H RcppEigen/inst/include/unsupported/Eigen/src/BVH/0000755000176200001440000000000012253717461021363 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/BVH/BVAlgorithms.h0000644000176200001440000003126012253717461024077 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Ilya Baran // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BVALGORITHMS_H #define EIGEN_BVALGORITHMS_H namespace Eigen { namespace internal { #ifndef EIGEN_PARSED_BY_DOXYGEN template bool intersect_helper(const BVH &tree, Intersector &intersector, typename BVH::Index root) { typedef typename BVH::Index Index; typedef typename BVH::VolumeIterator VolIter; typedef typename BVH::ObjectIterator ObjIter; VolIter vBegin = VolIter(), vEnd = VolIter(); ObjIter oBegin = ObjIter(), oEnd = ObjIter(); std::vector todo(1, root); while(!todo.empty()) { tree.getChildren(todo.back(), vBegin, vEnd, oBegin, oEnd); todo.pop_back(); for(; vBegin != vEnd; ++vBegin) //go through child volumes if(intersector.intersectVolume(tree.getVolume(*vBegin))) todo.push_back(*vBegin); for(; oBegin != oEnd; ++oBegin) //go through child objects if(intersector.intersectObject(*oBegin)) return true; //intersector said to stop query } return false; } #endif //not EIGEN_PARSED_BY_DOXYGEN template struct intersector_helper1 { intersector_helper1(const Object2 &inStored, Intersector &in) : stored(inStored), intersector(in) {} bool intersectVolume(const Volume1 &vol) { return intersector.intersectVolumeObject(vol, stored); } bool intersectObject(const Object1 &obj) { return intersector.intersectObjectObject(obj, stored); } Object2 stored; Intersector &intersector; private: intersector_helper1& operator=(const intersector_helper1&); }; template struct intersector_helper2 { intersector_helper2(const Object1 &inStored, Intersector &in) : stored(inStored), intersector(in) {} bool intersectVolume(const Volume2 &vol) { return intersector.intersectObjectVolume(stored, vol); } bool intersectObject(const Object2 &obj) { return intersector.intersectObjectObject(stored, obj); } Object1 stored; Intersector &intersector; private: intersector_helper2& operator=(const intersector_helper2&); }; } // end namespace internal /** Given a BVH, runs the query encapsulated by \a intersector. * The Intersector type must provide the following members: \code bool intersectVolume(const BVH::Volume &volume) //returns true if volume intersects the query bool intersectObject(const BVH::Object &object) //returns true if the search should terminate immediately \endcode */ template void BVIntersect(const BVH &tree, Intersector &intersector) { internal::intersect_helper(tree, intersector, tree.getRootIndex()); } /** Given two BVH's, runs the query on their Cartesian product encapsulated by \a intersector. * The Intersector type must provide the following members: \code bool intersectVolumeVolume(const BVH1::Volume &v1, const BVH2::Volume &v2) //returns true if product of volumes intersects the query bool intersectVolumeObject(const BVH1::Volume &v1, const BVH2::Object &o2) //returns true if the volume-object product intersects the query bool intersectObjectVolume(const BVH1::Object &o1, const BVH2::Volume &v2) //returns true if the volume-object product intersects the query bool intersectObjectObject(const BVH1::Object &o1, const BVH2::Object &o2) //returns true if the search should terminate immediately \endcode */ template void BVIntersect(const BVH1 &tree1, const BVH2 &tree2, Intersector &intersector) //TODO: tandem descent when it makes sense { typedef typename BVH1::Index Index1; typedef typename BVH2::Index Index2; typedef internal::intersector_helper1 Helper1; typedef internal::intersector_helper2 Helper2; typedef typename BVH1::VolumeIterator VolIter1; typedef typename BVH1::ObjectIterator ObjIter1; typedef typename BVH2::VolumeIterator VolIter2; typedef typename BVH2::ObjectIterator ObjIter2; VolIter1 vBegin1 = VolIter1(), vEnd1 = VolIter1(); ObjIter1 oBegin1 = ObjIter1(), oEnd1 = ObjIter1(); VolIter2 vBegin2 = VolIter2(), vEnd2 = VolIter2(), vCur2 = VolIter2(); ObjIter2 oBegin2 = ObjIter2(), oEnd2 = ObjIter2(), oCur2 = ObjIter2(); std::vector > todo(1, std::make_pair(tree1.getRootIndex(), tree2.getRootIndex())); while(!todo.empty()) { tree1.getChildren(todo.back().first, vBegin1, vEnd1, oBegin1, oEnd1); tree2.getChildren(todo.back().second, vBegin2, vEnd2, oBegin2, oEnd2); todo.pop_back(); for(; vBegin1 != vEnd1; ++vBegin1) { //go through child volumes of first tree const typename BVH1::Volume &vol1 = tree1.getVolume(*vBegin1); for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree if(intersector.intersectVolumeVolume(vol1, tree2.getVolume(*vCur2))) todo.push_back(std::make_pair(*vBegin1, *vCur2)); } for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree Helper1 helper(*oCur2, intersector); if(internal::intersect_helper(tree1, helper, *vBegin1)) return; //intersector said to stop query } } for(; oBegin1 != oEnd1; ++oBegin1) { //go through child objects of first tree for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree Helper2 helper(*oBegin1, intersector); if(internal::intersect_helper(tree2, helper, *vCur2)) return; //intersector said to stop query } for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree if(intersector.intersectObjectObject(*oBegin1, *oCur2)) return; //intersector said to stop query } } } } namespace internal { #ifndef EIGEN_PARSED_BY_DOXYGEN template typename Minimizer::Scalar minimize_helper(const BVH &tree, Minimizer &minimizer, typename BVH::Index root, typename Minimizer::Scalar minimum) { typedef typename Minimizer::Scalar Scalar; typedef typename BVH::Index Index; typedef std::pair QueueElement; //first element is priority typedef typename BVH::VolumeIterator VolIter; typedef typename BVH::ObjectIterator ObjIter; VolIter vBegin = VolIter(), vEnd = VolIter(); ObjIter oBegin = ObjIter(), oEnd = ObjIter(); std::priority_queue, std::greater > todo; //smallest is at the top todo.push(std::make_pair(Scalar(), root)); while(!todo.empty()) { tree.getChildren(todo.top().second, vBegin, vEnd, oBegin, oEnd); todo.pop(); for(; oBegin != oEnd; ++oBegin) //go through child objects minimum = (std::min)(minimum, minimizer.minimumOnObject(*oBegin)); for(; vBegin != vEnd; ++vBegin) { //go through child volumes Scalar val = minimizer.minimumOnVolume(tree.getVolume(*vBegin)); if(val < minimum) todo.push(std::make_pair(val, *vBegin)); } } return minimum; } #endif //not EIGEN_PARSED_BY_DOXYGEN template struct minimizer_helper1 { typedef typename Minimizer::Scalar Scalar; minimizer_helper1(const Object2 &inStored, Minimizer &m) : stored(inStored), minimizer(m) {} Scalar minimumOnVolume(const Volume1 &vol) { return minimizer.minimumOnVolumeObject(vol, stored); } Scalar minimumOnObject(const Object1 &obj) { return minimizer.minimumOnObjectObject(obj, stored); } Object2 stored; Minimizer &minimizer; private: minimizer_helper1& operator=(const minimizer_helper1&); }; template struct minimizer_helper2 { typedef typename Minimizer::Scalar Scalar; minimizer_helper2(const Object1 &inStored, Minimizer &m) : stored(inStored), minimizer(m) {} Scalar minimumOnVolume(const Volume2 &vol) { return minimizer.minimumOnObjectVolume(stored, vol); } Scalar minimumOnObject(const Object2 &obj) { return minimizer.minimumOnObjectObject(stored, obj); } Object1 stored; Minimizer &minimizer; private: minimizer_helper2& operator=(const minimizer_helper2&); }; } // end namespace internal /** Given a BVH, runs the query encapsulated by \a minimizer. * \returns the minimum value. * The Minimizer type must provide the following members: \code typedef Scalar //the numeric type of what is being minimized--not necessarily the Scalar type of the BVH (if it has one) Scalar minimumOnVolume(const BVH::Volume &volume) Scalar minimumOnObject(const BVH::Object &object) \endcode */ template typename Minimizer::Scalar BVMinimize(const BVH &tree, Minimizer &minimizer) { return internal::minimize_helper(tree, minimizer, tree.getRootIndex(), (std::numeric_limits::max)()); } /** Given two BVH's, runs the query on their cartesian product encapsulated by \a minimizer. * \returns the minimum value. * The Minimizer type must provide the following members: \code typedef Scalar //the numeric type of what is being minimized--not necessarily the Scalar type of the BVH (if it has one) Scalar minimumOnVolumeVolume(const BVH1::Volume &v1, const BVH2::Volume &v2) Scalar minimumOnVolumeObject(const BVH1::Volume &v1, const BVH2::Object &o2) Scalar minimumOnObjectVolume(const BVH1::Object &o1, const BVH2::Volume &v2) Scalar minimumOnObjectObject(const BVH1::Object &o1, const BVH2::Object &o2) \endcode */ template typename Minimizer::Scalar BVMinimize(const BVH1 &tree1, const BVH2 &tree2, Minimizer &minimizer) { typedef typename Minimizer::Scalar Scalar; typedef typename BVH1::Index Index1; typedef typename BVH2::Index Index2; typedef internal::minimizer_helper1 Helper1; typedef internal::minimizer_helper2 Helper2; typedef std::pair > QueueElement; //first element is priority typedef typename BVH1::VolumeIterator VolIter1; typedef typename BVH1::ObjectIterator ObjIter1; typedef typename BVH2::VolumeIterator VolIter2; typedef typename BVH2::ObjectIterator ObjIter2; VolIter1 vBegin1 = VolIter1(), vEnd1 = VolIter1(); ObjIter1 oBegin1 = ObjIter1(), oEnd1 = ObjIter1(); VolIter2 vBegin2 = VolIter2(), vEnd2 = VolIter2(), vCur2 = VolIter2(); ObjIter2 oBegin2 = ObjIter2(), oEnd2 = ObjIter2(), oCur2 = ObjIter2(); std::priority_queue, std::greater > todo; //smallest is at the top Scalar minimum = (std::numeric_limits::max)(); todo.push(std::make_pair(Scalar(), std::make_pair(tree1.getRootIndex(), tree2.getRootIndex()))); while(!todo.empty()) { tree1.getChildren(todo.top().second.first, vBegin1, vEnd1, oBegin1, oEnd1); tree2.getChildren(todo.top().second.second, vBegin2, vEnd2, oBegin2, oEnd2); todo.pop(); for(; oBegin1 != oEnd1; ++oBegin1) { //go through child objects of first tree for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree minimum = (std::min)(minimum, minimizer.minimumOnObjectObject(*oBegin1, *oCur2)); } for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree Helper2 helper(*oBegin1, minimizer); minimum = (std::min)(minimum, internal::minimize_helper(tree2, helper, *vCur2, minimum)); } } for(; vBegin1 != vEnd1; ++vBegin1) { //go through child volumes of first tree const typename BVH1::Volume &vol1 = tree1.getVolume(*vBegin1); for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree Helper1 helper(*oCur2, minimizer); minimum = (std::min)(minimum, internal::minimize_helper(tree1, helper, *vBegin1, minimum)); } for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree Scalar val = minimizer.minimumOnVolumeVolume(vol1, tree2.getVolume(*vCur2)); if(val < minimum) todo.push(std::make_pair(val, std::make_pair(*vBegin1, *vCur2))); } } } return minimum; } } // end namespace Eigen #endif // EIGEN_BVALGORITHMS_H RcppEigen/inst/include/unsupported/Eigen/src/BVH/KdBVH.h0000644000176200001440000002164612253717461022443 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Ilya Baran // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef KDBVH_H_INCLUDED #define KDBVH_H_INCLUDED namespace Eigen { namespace internal { //internal pair class for the BVH--used instead of std::pair because of alignment template struct vector_int_pair { EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar, Dim) typedef Matrix VectorType; vector_int_pair(const VectorType &v, int i) : first(v), second(i) {} VectorType first; int second; }; //these templates help the tree initializer get the bounding boxes either from a provided //iterator range or using bounding_box in a unified way template struct get_boxes_helper { void operator()(const ObjectList &objects, BoxIter boxBegin, BoxIter boxEnd, VolumeList &outBoxes) { outBoxes.insert(outBoxes.end(), boxBegin, boxEnd); eigen_assert(outBoxes.size() == objects.size()); } }; template struct get_boxes_helper { void operator()(const ObjectList &objects, int, int, VolumeList &outBoxes) { outBoxes.reserve(objects.size()); for(int i = 0; i < (int)objects.size(); ++i) outBoxes.push_back(bounding_box(objects[i])); } }; } // end namespace internal /** \class KdBVH * \brief A simple bounding volume hierarchy based on AlignedBox * * \param _Scalar The underlying scalar type of the bounding boxes * \param _Dim The dimension of the space in which the hierarchy lives * \param _Object The object type that lives in the hierarchy. It must have value semantics. Either bounding_box(_Object) must * be defined and return an AlignedBox<_Scalar, _Dim> or bounding boxes must be provided to the tree initializer. * * This class provides a simple (as opposed to optimized) implementation of a bounding volume hierarchy analogous to a Kd-tree. * Given a sequence of objects, it computes their bounding boxes, constructs a Kd-tree of their centers * and builds a BVH with the structure of that Kd-tree. When the elements of the tree are too expensive to be copied around, * it is useful for _Object to be a pointer. */ template class KdBVH { public: enum { Dim = _Dim }; typedef _Object Object; typedef std::vector > ObjectList; typedef _Scalar Scalar; typedef AlignedBox Volume; typedef std::vector > VolumeList; typedef int Index; typedef const int *VolumeIterator; //the iterators are just pointers into the tree's vectors typedef const Object *ObjectIterator; KdBVH() {} /** Given an iterator range over \a Object references, constructs the BVH. Requires that bounding_box(Object) return a Volume. */ template KdBVH(Iter begin, Iter end) { init(begin, end, 0, 0); } //int is recognized by init as not being an iterator type /** Given an iterator range over \a Object references and an iterator range over their bounding boxes, constructs the BVH */ template KdBVH(OIter begin, OIter end, BIter boxBegin, BIter boxEnd) { init(begin, end, boxBegin, boxEnd); } /** Given an iterator range over \a Object references, constructs the BVH, overwriting whatever is in there currently. * Requires that bounding_box(Object) return a Volume. */ template void init(Iter begin, Iter end) { init(begin, end, 0, 0); } /** Given an iterator range over \a Object references and an iterator range over their bounding boxes, * constructs the BVH, overwriting whatever is in there currently. */ template void init(OIter begin, OIter end, BIter boxBegin, BIter boxEnd) { objects.clear(); boxes.clear(); children.clear(); objects.insert(objects.end(), begin, end); int n = static_cast(objects.size()); if(n < 2) return; //if we have at most one object, we don't need any internal nodes VolumeList objBoxes; VIPairList objCenters; //compute the bounding boxes depending on BIter type internal::get_boxes_helper()(objects, boxBegin, boxEnd, objBoxes); objCenters.reserve(n); boxes.reserve(n - 1); children.reserve(2 * n - 2); for(int i = 0; i < n; ++i) objCenters.push_back(VIPair(objBoxes[i].center(), i)); build(objCenters, 0, n, objBoxes, 0); //the recursive part of the algorithm ObjectList tmp(n); tmp.swap(objects); for(int i = 0; i < n; ++i) objects[i] = tmp[objCenters[i].second]; } /** \returns the index of the root of the hierarchy */ inline Index getRootIndex() const { return (int)boxes.size() - 1; } /** Given an \a index of a node, on exit, \a outVBegin and \a outVEnd range over the indices of the volume children of the node * and \a outOBegin and \a outOEnd range over the object children of the node */ EIGEN_STRONG_INLINE void getChildren(Index index, VolumeIterator &outVBegin, VolumeIterator &outVEnd, ObjectIterator &outOBegin, ObjectIterator &outOEnd) const { //inlining this function should open lots of optimization opportunities to the compiler if(index < 0) { outVBegin = outVEnd; if(!objects.empty()) outOBegin = &(objects[0]); outOEnd = outOBegin + objects.size(); //output all objects--necessary when the tree has only one object return; } int numBoxes = static_cast(boxes.size()); int idx = index * 2; if(children[idx + 1] < numBoxes) { //second index is always bigger outVBegin = &(children[idx]); outVEnd = outVBegin + 2; outOBegin = outOEnd; } else if(children[idx] >= numBoxes) { //if both children are objects outVBegin = outVEnd; outOBegin = &(objects[children[idx] - numBoxes]); outOEnd = outOBegin + 2; } else { //if the first child is a volume and the second is an object outVBegin = &(children[idx]); outVEnd = outVBegin + 1; outOBegin = &(objects[children[idx + 1] - numBoxes]); outOEnd = outOBegin + 1; } } /** \returns the bounding box of the node at \a index */ inline const Volume &getVolume(Index index) const { return boxes[index]; } private: typedef internal::vector_int_pair VIPair; typedef std::vector > VIPairList; typedef Matrix VectorType; struct VectorComparator //compares vectors, or, more specificall, VIPairs along a particular dimension { VectorComparator(int inDim) : dim(inDim) {} inline bool operator()(const VIPair &v1, const VIPair &v2) const { return v1.first[dim] < v2.first[dim]; } int dim; }; //Build the part of the tree between objects[from] and objects[to] (not including objects[to]). //This routine partitions the objCenters in [from, to) along the dimension dim, recursively constructs //the two halves, and adds their parent node. TODO: a cache-friendlier layout void build(VIPairList &objCenters, int from, int to, const VolumeList &objBoxes, int dim) { eigen_assert(to - from > 1); if(to - from == 2) { boxes.push_back(objBoxes[objCenters[from].second].merged(objBoxes[objCenters[from + 1].second])); children.push_back(from + (int)objects.size() - 1); //there are objects.size() - 1 tree nodes children.push_back(from + (int)objects.size()); } else if(to - from == 3) { int mid = from + 2; std::nth_element(objCenters.begin() + from, objCenters.begin() + mid, objCenters.begin() + to, VectorComparator(dim)); //partition build(objCenters, from, mid, objBoxes, (dim + 1) % Dim); int idx1 = (int)boxes.size() - 1; boxes.push_back(boxes[idx1].merged(objBoxes[objCenters[mid].second])); children.push_back(idx1); children.push_back(mid + (int)objects.size() - 1); } else { int mid = from + (to - from) / 2; nth_element(objCenters.begin() + from, objCenters.begin() + mid, objCenters.begin() + to, VectorComparator(dim)); //partition build(objCenters, from, mid, objBoxes, (dim + 1) % Dim); int idx1 = (int)boxes.size() - 1; build(objCenters, mid, to, objBoxes, (dim + 1) % Dim); int idx2 = (int)boxes.size() - 1; boxes.push_back(boxes[idx1].merged(boxes[idx2])); children.push_back(idx1); children.push_back(idx2); } } std::vector children; //children of x are children[2x] and children[2x+1], indices bigger than boxes.size() index into objects. VolumeList boxes; ObjectList objects; }; } // end namespace Eigen #endif //KDBVH_H_INCLUDED RcppEigen/inst/include/unsupported/Eigen/src/MoreVectorization/0000755000176200001440000000000012253717461024427 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/MoreVectorization/MathFunctions.h0000644000176200001440000000573312253717461027372 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Rohit Garg // Copyright (C) 2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H #define EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H namespace Eigen { namespace internal { /** \internal \returns the arcsin of \a a (coeff-wise) */ template inline static Packet pasin(Packet a) { return std::asin(a); } #ifdef EIGEN_VECTORIZE_SSE template<> EIGEN_DONT_INLINE Packet4f pasin(Packet4f x) { _EIGEN_DECLARE_CONST_Packet4f(half, 0.5); _EIGEN_DECLARE_CONST_Packet4f(minus_half, -0.5); _EIGEN_DECLARE_CONST_Packet4f(3half, 1.5); _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000); _EIGEN_DECLARE_CONST_Packet4f(pi, 3.141592654); _EIGEN_DECLARE_CONST_Packet4f(pi_over_2, 3.141592654*0.5); _EIGEN_DECLARE_CONST_Packet4f(asin1, 4.2163199048E-2); _EIGEN_DECLARE_CONST_Packet4f(asin2, 2.4181311049E-2); _EIGEN_DECLARE_CONST_Packet4f(asin3, 4.5470025998E-2); _EIGEN_DECLARE_CONST_Packet4f(asin4, 7.4953002686E-2); _EIGEN_DECLARE_CONST_Packet4f(asin5, 1.6666752422E-1); Packet4f a = pabs(x);//got the absolute value Packet4f sign_bit= _mm_and_ps(x, p4f_sign_mask);//extracted the sign bit Packet4f z1,z2;//will need them during computation //will compute the two branches for asin //so first compare with half Packet4f branch_mask= _mm_cmpgt_ps(a, p4f_half);//this is to select which branch to take //both will be taken, and finally results will be merged //the branch for values >0.5 { //the core series expansion z1=pmadd(p4f_minus_half,a,p4f_half); Packet4f x1=psqrt(z1); Packet4f s1=pmadd(p4f_asin1, z1, p4f_asin2); Packet4f s2=pmadd(s1, z1, p4f_asin3); Packet4f s3=pmadd(s2,z1, p4f_asin4); Packet4f s4=pmadd(s3,z1, p4f_asin5); Packet4f temp=pmul(s4,z1);//not really a madd but a mul by z so that the next term can be a madd z1=pmadd(temp,x1,x1); z1=padd(z1,z1); z1=psub(p4f_pi_over_2,z1); } { //the core series expansion Packet4f x2=a; z2=pmul(x2,x2); Packet4f s1=pmadd(p4f_asin1, z2, p4f_asin2); Packet4f s2=pmadd(s1, z2, p4f_asin3); Packet4f s3=pmadd(s2,z2, p4f_asin4); Packet4f s4=pmadd(s3,z2, p4f_asin5); Packet4f temp=pmul(s4,z2);//not really a madd but a mul by z so that the next term can be a madd z2=pmadd(temp,x2,x2); } /* select the correct result from the two branch evaluations */ z1 = _mm_and_ps(branch_mask, z1); z2 = _mm_andnot_ps(branch_mask, z2); Packet4f z = _mm_or_ps(z1,z2); /* update the sign */ return _mm_xor_ps(z, sign_bit); } #endif // EIGEN_VECTORIZE_SSE } // end namespace internal } // end namespace Eigen #endif // EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H RcppEigen/inst/include/unsupported/Eigen/KroneckerProduct0000644000176200001440000000166013100733213023350 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_KRONECKER_PRODUCT_MODULE_H #define EIGEN_KRONECKER_PRODUCT_MODULE_H #include "../../Eigen/Core" #include "../../Eigen/src/Core/util/DisableStupidWarnings.h" #include "../../Eigen/src/SparseCore/SparseUtil.h" namespace Eigen { /** * \defgroup KroneckerProduct_Module KroneckerProduct module * * This module contains an experimental Kronecker product implementation. * * \code * #include * \endcode */ } // namespace Eigen #include "src/KroneckerProduct/KroneckerTensorProduct.h" #include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_KRONECKER_PRODUCT_MODULE_H RcppEigen/inst/include/unsupported/Eigen/MPRealSupport0000644000176200001440000001636513100733213022611 0ustar liggesusers// This file is part of a joint effort between Eigen, a lightweight C++ template library // for linear algebra, and MPFR C++, a C++ interface to MPFR library (http://www.holoborodko.com/pavel/) // // Copyright (C) 2010-2012 Pavel Holoborodko // Copyright (C) 2010 Konstantin Holoborodko // Copyright (C) 2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MPREALSUPPORT_MODULE_H #define EIGEN_MPREALSUPPORT_MODULE_H #include #include namespace Eigen { /** * \defgroup MPRealSupport_Module MPFRC++ Support module * \code * #include * \endcode * * This module provides support for multi precision floating point numbers * via the
MPFR C++ * library which itself is built upon MPFR/GMP. * * \warning MPFR C++ is licensed under the GPL. * * You can find a copy of MPFR C++ that is known to be compatible in the unsupported/test/mpreal folder. * * Here is an example: * \code #include #include #include using namespace mpfr; using namespace Eigen; int main() { // set precision to 256 bits (double has only 53 bits) mpreal::set_default_prec(256); // Declare matrix and vector types with multi-precision scalar type typedef Matrix MatrixXmp; typedef Matrix VectorXmp; MatrixXmp A = MatrixXmp::Random(100,100); VectorXmp b = VectorXmp::Random(100); // Solve Ax=b using LU VectorXmp x = A.lu().solve(b); std::cout << "relative error: " << (A*x - b).norm() / b.norm() << std::endl; return 0; } \endcode * */ template<> struct NumTraits : GenericNumTraits { enum { IsInteger = 0, IsSigned = 1, IsComplex = 0, RequireInitialization = 1, ReadCost = HugeCost, AddCost = HugeCost, MulCost = HugeCost }; typedef mpfr::mpreal Real; typedef mpfr::mpreal NonInteger; static inline Real highest (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::maxval(Precision); } static inline Real lowest (long Precision = mpfr::mpreal::get_default_prec()) { return -mpfr::maxval(Precision); } // Constants static inline Real Pi (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_pi(Precision); } static inline Real Euler (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_euler(Precision); } static inline Real Log2 (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_log2(Precision); } static inline Real Catalan (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_catalan(Precision); } static inline Real epsilon (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::machine_epsilon(Precision); } static inline Real epsilon (const Real& x) { return mpfr::machine_epsilon(x); } #ifdef MPREAL_HAVE_DYNAMIC_STD_NUMERIC_LIMITS static inline int digits10 (long Precision = mpfr::mpreal::get_default_prec()) { return std::numeric_limits::digits10(Precision); } static inline int digits10 (const Real& x) { return std::numeric_limits::digits10(x); } #endif static inline Real dummy_precision() { mpfr_prec_t weak_prec = ((mpfr::mpreal::get_default_prec()-1) * 90) / 100; return mpfr::machine_epsilon(weak_prec); } }; namespace internal { template<> inline mpfr::mpreal random() { return mpfr::random(); } template<> inline mpfr::mpreal random(const mpfr::mpreal& a, const mpfr::mpreal& b) { return a + (b-a) * random(); } inline bool isMuchSmallerThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps) { return mpfr::abs(a) <= mpfr::abs(b) * eps; } inline bool isApprox(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps) { return mpfr::isEqualFuzzy(a,b,eps); } inline bool isApproxOrLessThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps) { return a <= b || mpfr::isEqualFuzzy(a,b,eps); } template<> inline long double cast(const mpfr::mpreal& x) { return x.toLDouble(); } template<> inline double cast(const mpfr::mpreal& x) { return x.toDouble(); } template<> inline long cast(const mpfr::mpreal& x) { return x.toLong(); } template<> inline int cast(const mpfr::mpreal& x) { return int(x.toLong()); } // Specialize GEBP kernel and traits for mpreal (no need for peeling, nor complicated stuff) // This also permits to directly call mpfr's routines and avoid many temporaries produced by mpreal template<> class gebp_traits { public: typedef mpfr::mpreal ResScalar; enum { Vectorizable = false, LhsPacketSize = 1, RhsPacketSize = 1, ResPacketSize = 1, NumberOfRegisters = 1, nr = 1, mr = 1, LhsProgress = 1, RhsProgress = 1 }; typedef ResScalar LhsPacket; typedef ResScalar RhsPacket; typedef ResScalar ResPacket; }; template struct gebp_kernel { typedef mpfr::mpreal mpreal; EIGEN_DONT_INLINE void operator()(const DataMapper& res, const mpreal* blockA, const mpreal* blockB, Index rows, Index depth, Index cols, const mpreal& alpha, Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0) { if(rows==0 || cols==0 || depth==0) return; mpreal acc1(0,mpfr_get_prec(blockA[0].mpfr_srcptr())), tmp (0,mpfr_get_prec(blockA[0].mpfr_srcptr())); if(strideA==-1) strideA = depth; if(strideB==-1) strideB = depth; for(Index i=0; i // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LEVENBERGMARQUARDT_MODULE #define EIGEN_LEVENBERGMARQUARDT_MODULE // #include #include #include #include #include #include /** * \defgroup LevenbergMarquardt_Module Levenberg-Marquardt module * * \code * #include * \endcode * * */ #include "Eigen/SparseCore" #ifndef EIGEN_PARSED_BY_DOXYGEN #include "src/LevenbergMarquardt/LMqrsolv.h" #include "src/LevenbergMarquardt/LMcovar.h" #include "src/LevenbergMarquardt/LMpar.h" #endif #include "src/LevenbergMarquardt/LevenbergMarquardt.h" #include "src/LevenbergMarquardt/LMonestep.h" #endif // EIGEN_LEVENBERGMARQUARDT_MODULE RcppEigen/inst/include/unsupported/Eigen/IterativeSolvers0000644000176200001440000000241713100733213023377 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ITERATIVE_SOLVERS_MODULE_H #define EIGEN_ITERATIVE_SOLVERS_MODULE_H #include /** * \defgroup IterativeSolvers_Module Iterative solvers module * This module aims to provide various iterative linear and non linear solver algorithms. * It currently provides: * - a constrained conjugate gradient * - a Householder GMRES implementation * \code * #include * \endcode */ //@{ #ifndef EIGEN_MPL2_ONLY #include "src/IterativeSolvers/IterationController.h" #include "src/IterativeSolvers/ConstrainedConjGrad.h" #endif #include "src/IterativeSolvers/IncompleteLU.h" #include "../../Eigen/Jacobi" #include "../../Eigen/Householder" #include "src/IterativeSolvers/GMRES.h" #include "src/IterativeSolvers/DGMRES.h" //#include "src/IterativeSolvers/SSORPreconditioner.h" #include "src/IterativeSolvers/MINRES.h" //@} #endif // EIGEN_ITERATIVE_SOLVERS_MODULE_H RcppEigen/inst/include/unsupported/Eigen/MatrixFunctions0000644000176200001440000004260213100733213023222 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Jitse Niesen // Copyright (C) 2012 Chen-Pang He // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIX_FUNCTIONS #define EIGEN_MATRIX_FUNCTIONS #include #include #include #include #include /** * \defgroup MatrixFunctions_Module Matrix functions module * \brief This module aims to provide various methods for the computation of * matrix functions. * * To use this module, add * \code * #include * \endcode * at the start of your source file. * * This module defines the following MatrixBase methods. * - \ref matrixbase_cos "MatrixBase::cos()", for computing the matrix cosine * - \ref matrixbase_cosh "MatrixBase::cosh()", for computing the matrix hyperbolic cosine * - \ref matrixbase_exp "MatrixBase::exp()", for computing the matrix exponential * - \ref matrixbase_log "MatrixBase::log()", for computing the matrix logarithm * - \ref matrixbase_pow "MatrixBase::pow()", for computing the matrix power * - \ref matrixbase_matrixfunction "MatrixBase::matrixFunction()", for computing general matrix functions * - \ref matrixbase_sin "MatrixBase::sin()", for computing the matrix sine * - \ref matrixbase_sinh "MatrixBase::sinh()", for computing the matrix hyperbolic sine * - \ref matrixbase_sqrt "MatrixBase::sqrt()", for computing the matrix square root * * These methods are the main entry points to this module. * * %Matrix functions are defined as follows. Suppose that \f$ f \f$ * is an entire function (that is, a function on the complex plane * that is everywhere complex differentiable). Then its Taylor * series * \f[ f(0) + f'(0) x + \frac{f''(0)}{2} x^2 + \frac{f'''(0)}{3!} x^3 + \cdots \f] * converges to \f$ f(x) \f$. In this case, we can define the matrix * function by the same series: * \f[ f(M) = f(0) + f'(0) M + \frac{f''(0)}{2} M^2 + \frac{f'''(0)}{3!} M^3 + \cdots \f] * */ #include "src/MatrixFunctions/MatrixExponential.h" #include "src/MatrixFunctions/MatrixFunction.h" #include "src/MatrixFunctions/MatrixSquareRoot.h" #include "src/MatrixFunctions/MatrixLogarithm.h" #include "src/MatrixFunctions/MatrixPower.h" /** \page matrixbaseextra_page \ingroup MatrixFunctions_Module \section matrixbaseextra MatrixBase methods defined in the MatrixFunctions module The remainder of the page documents the following MatrixBase methods which are defined in the MatrixFunctions module. \subsection matrixbase_cos MatrixBase::cos() Compute the matrix cosine. \code const MatrixFunctionReturnValue MatrixBase::cos() const \endcode \param[in] M a square matrix. \returns expression representing \f$ \cos(M) \f$. This function computes the matrix cosine. Use ArrayBase::cos() for computing the entry-wise cosine. The implementation calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cos(). \sa \ref matrixbase_sin "sin()" for an example. \subsection matrixbase_cosh MatrixBase::cosh() Compute the matrix hyberbolic cosine. \code const MatrixFunctionReturnValue MatrixBase::cosh() const \endcode \param[in] M a square matrix. \returns expression representing \f$ \cosh(M) \f$ This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cosh(). \sa \ref matrixbase_sinh "sinh()" for an example. \subsection matrixbase_exp MatrixBase::exp() Compute the matrix exponential. \code const MatrixExponentialReturnValue MatrixBase::exp() const \endcode \param[in] M matrix whose exponential is to be computed. \returns expression representing the matrix exponential of \p M. The matrix exponential of \f$ M \f$ is defined by \f[ \exp(M) = \sum_{k=0}^\infty \frac{M^k}{k!}. \f] The matrix exponential can be used to solve linear ordinary differential equations: the solution of \f$ y' = My \f$ with the initial condition \f$ y(0) = y_0 \f$ is given by \f$ y(t) = \exp(M) y_0 \f$. The matrix exponential is different from applying the exp function to all the entries in the matrix. Use ArrayBase::exp() if you want to do the latter. The cost of the computation is approximately \f$ 20 n^3 \f$ for matrices of size \f$ n \f$. The number 20 depends weakly on the norm of the matrix. The matrix exponential is computed using the scaling-and-squaring method combined with Padé approximation. The matrix is first rescaled, then the exponential of the reduced matrix is computed approximant, and then the rescaling is undone by repeated squaring. The degree of the Padé approximant is chosen such that the approximation error is less than the round-off error. However, errors may accumulate during the squaring phase. Details of the algorithm can be found in: Nicholas J. Higham, "The scaling and squaring method for the matrix exponential revisited," SIAM J. %Matrix Anal. Applic., 26:1179–1193, 2005. Example: The following program checks that \f[ \exp \left[ \begin{array}{ccc} 0 & \frac14\pi & 0 \\ -\frac14\pi & 0 & 0 \\ 0 & 0 & 0 \end{array} \right] = \left[ \begin{array}{ccc} \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\ \frac12\sqrt2 & \frac12\sqrt2 & 0 \\ 0 & 0 & 1 \end{array} \right]. \f] This corresponds to a rotation of \f$ \frac14\pi \f$ radians around the z-axis. \include MatrixExponential.cpp Output: \verbinclude MatrixExponential.out \note \p M has to be a matrix of \c float, \c double, \c long double \c complex, \c complex, or \c complex . \subsection matrixbase_log MatrixBase::log() Compute the matrix logarithm. \code const MatrixLogarithmReturnValue MatrixBase::log() const \endcode \param[in] M invertible matrix whose logarithm is to be computed. \returns expression representing the matrix logarithm root of \p M. The matrix logarithm of \f$ M \f$ is a matrix \f$ X \f$ such that \f$ \exp(X) = M \f$ where exp denotes the matrix exponential. As for the scalar logarithm, the equation \f$ \exp(X) = M \f$ may have multiple solutions; this function returns a matrix whose eigenvalues have imaginary part in the interval \f$ (-\pi,\pi] \f$. The matrix logarithm is different from applying the log function to all the entries in the matrix. Use ArrayBase::log() if you want to do the latter. In the real case, the matrix \f$ M \f$ should be invertible and it should have no eigenvalues which are real and negative (pairs of complex conjugate eigenvalues are allowed). In the complex case, it only needs to be invertible. This function computes the matrix logarithm using the Schur-Parlett algorithm as implemented by MatrixBase::matrixFunction(). The logarithm of an atomic block is computed by MatrixLogarithmAtomic, which uses direct computation for 1-by-1 and 2-by-2 blocks and an inverse scaling-and-squaring algorithm for bigger blocks, with the square roots computed by MatrixBase::sqrt(). Details of the algorithm can be found in Section 11.6.2 of: Nicholas J. Higham, Functions of Matrices: Theory and Computation, SIAM 2008. ISBN 978-0-898716-46-7. Example: The following program checks that \f[ \log \left[ \begin{array}{ccc} \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\ \frac12\sqrt2 & \frac12\sqrt2 & 0 \\ 0 & 0 & 1 \end{array} \right] = \left[ \begin{array}{ccc} 0 & \frac14\pi & 0 \\ -\frac14\pi & 0 & 0 \\ 0 & 0 & 0 \end{array} \right]. \f] This corresponds to a rotation of \f$ \frac14\pi \f$ radians around the z-axis. This is the inverse of the example used in the documentation of \ref matrixbase_exp "exp()". \include MatrixLogarithm.cpp Output: \verbinclude MatrixLogarithm.out \note \p M has to be a matrix of \c float, \c double, long double, \c complex, \c complex, or \c complex . \sa MatrixBase::exp(), MatrixBase::matrixFunction(), class MatrixLogarithmAtomic, MatrixBase::sqrt(). \subsection matrixbase_pow MatrixBase::pow() Compute the matrix raised to arbitrary real power. \code const MatrixPowerReturnValue MatrixBase::pow(RealScalar p) const \endcode \param[in] M base of the matrix power, should be a square matrix. \param[in] p exponent of the matrix power. The matrix power \f$ M^p \f$ is defined as \f$ \exp(p \log(M)) \f$, where exp denotes the matrix exponential, and log denotes the matrix logarithm. This is different from raising all the entries in the matrix to the p-th power. Use ArrayBase::pow() if you want to do the latter. If \p p is complex, the scalar type of \p M should be the type of \p p . \f$ M^p \f$ simply evaluates into \f$ \exp(p \log(M)) \f$. Therefore, the matrix \f$ M \f$ should meet the conditions to be an argument of matrix logarithm. If \p p is real, it is casted into the real scalar type of \p M. Then this function computes the matrix power using the Schur-Padé algorithm as implemented by class MatrixPower. The exponent is split into integral part and fractional part, where the fractional part is in the interval \f$ (-1, 1) \f$. The main diagonal and the first super-diagonal is directly computed. If \p M is singular with a semisimple zero eigenvalue and \p p is positive, the Schur factor \f$ T \f$ is reordered with Givens rotations, i.e. \f[ T = \left[ \begin{array}{cc} T_1 & T_2 \\ 0 & 0 \end{array} \right] \f] where \f$ T_1 \f$ is invertible. Then \f$ T^p \f$ is given by \f[ T^p = \left[ \begin{array}{cc} T_1^p & T_1^{-1} T_1^p T_2 \\ 0 & 0 \end{array}. \right] \f] \warning Fractional power of a matrix with a non-semisimple zero eigenvalue is not well-defined. We introduce an assertion failure against inaccurate result, e.g. \code #include #include int main() { Eigen::Matrix4d A; A << 0, 0, 2, 3, 0, 0, 4, 5, 0, 0, 6, 7, 0, 0, 8, 9; std::cout << A.pow(0.37) << std::endl; // The 1 makes eigenvalue 0 non-semisimple. A.coeffRef(0, 1) = 1; // This fails if EIGEN_NO_DEBUG is undefined. std::cout << A.pow(0.37) << std::endl; return 0; } \endcode Details of the algorithm can be found in: Nicholas J. Higham and Lijing Lin, "A Schur-Padé algorithm for fractional powers of a matrix," SIAM J. %Matrix Anal. Applic., 32(3):1056–1078, 2011. Example: The following program checks that \f[ \left[ \begin{array}{ccc} \cos1 & -\sin1 & 0 \\ \sin1 & \cos1 & 0 \\ 0 & 0 & 1 \end{array} \right]^{\frac14\pi} = \left[ \begin{array}{ccc} \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\ \frac12\sqrt2 & \frac12\sqrt2 & 0 \\ 0 & 0 & 1 \end{array} \right]. \f] This corresponds to \f$ \frac14\pi \f$ rotations of 1 radian around the z-axis. \include MatrixPower.cpp Output: \verbinclude MatrixPower.out MatrixBase::pow() is user-friendly. However, there are some circumstances under which you should use class MatrixPower directly. MatrixPower can save the result of Schur decomposition, so it's better for computing various powers for the same matrix. Example: \include MatrixPower_optimal.cpp Output: \verbinclude MatrixPower_optimal.out \note \p M has to be a matrix of \c float, \c double, long double, \c complex, \c complex, or \c complex . \sa MatrixBase::exp(), MatrixBase::log(), class MatrixPower. \subsection matrixbase_matrixfunction MatrixBase::matrixFunction() Compute a matrix function. \code const MatrixFunctionReturnValue MatrixBase::matrixFunction(typename internal::stem_function::Scalar>::type f) const \endcode \param[in] M argument of matrix function, should be a square matrix. \param[in] f an entire function; \c f(x,n) should compute the n-th derivative of f at x. \returns expression representing \p f applied to \p M. Suppose that \p M is a matrix whose entries have type \c Scalar. Then, the second argument, \p f, should be a function with prototype \code ComplexScalar f(ComplexScalar, int) \endcode where \c ComplexScalar = \c std::complex if \c Scalar is real (e.g., \c float or \c double) and \c ComplexScalar = \c Scalar if \c Scalar is complex. The return value of \c f(x,n) should be \f$ f^{(n)}(x) \f$, the n-th derivative of f at x. This routine uses the algorithm described in: Philip Davies and Nicholas J. Higham, "A Schur-Parlett algorithm for computing matrix functions", SIAM J. %Matrix Anal. Applic., 25:464–485, 2003. The actual work is done by the MatrixFunction class. Example: The following program checks that \f[ \exp \left[ \begin{array}{ccc} 0 & \frac14\pi & 0 \\ -\frac14\pi & 0 & 0 \\ 0 & 0 & 0 \end{array} \right] = \left[ \begin{array}{ccc} \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\ \frac12\sqrt2 & \frac12\sqrt2 & 0 \\ 0 & 0 & 1 \end{array} \right]. \f] This corresponds to a rotation of \f$ \frac14\pi \f$ radians around the z-axis. This is the same example as used in the documentation of \ref matrixbase_exp "exp()". \include MatrixFunction.cpp Output: \verbinclude MatrixFunction.out Note that the function \c expfn is defined for complex numbers \c x, even though the matrix \c A is over the reals. Instead of \c expfn, we could also have used StdStemFunctions::exp: \code A.matrixFunction(StdStemFunctions >::exp, &B); \endcode \subsection matrixbase_sin MatrixBase::sin() Compute the matrix sine. \code const MatrixFunctionReturnValue MatrixBase::sin() const \endcode \param[in] M a square matrix. \returns expression representing \f$ \sin(M) \f$. This function computes the matrix sine. Use ArrayBase::sin() for computing the entry-wise sine. The implementation calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sin(). Example: \include MatrixSine.cpp Output: \verbinclude MatrixSine.out \subsection matrixbase_sinh MatrixBase::sinh() Compute the matrix hyperbolic sine. \code MatrixFunctionReturnValue MatrixBase::sinh() const \endcode \param[in] M a square matrix. \returns expression representing \f$ \sinh(M) \f$ This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sinh(). Example: \include MatrixSinh.cpp Output: \verbinclude MatrixSinh.out \subsection matrixbase_sqrt MatrixBase::sqrt() Compute the matrix square root. \code const MatrixSquareRootReturnValue MatrixBase::sqrt() const \endcode \param[in] M invertible matrix whose square root is to be computed. \returns expression representing the matrix square root of \p M. The matrix square root of \f$ M \f$ is the matrix \f$ M^{1/2} \f$ whose square is the original matrix; so if \f$ S = M^{1/2} \f$ then \f$ S^2 = M \f$. This is different from taking the square root of all the entries in the matrix; use ArrayBase::sqrt() if you want to do the latter. In the real case, the matrix \f$ M \f$ should be invertible and it should have no eigenvalues which are real and negative (pairs of complex conjugate eigenvalues are allowed). In that case, the matrix has a square root which is also real, and this is the square root computed by this function. The matrix square root is computed by first reducing the matrix to quasi-triangular form with the real Schur decomposition. The square root of the quasi-triangular matrix can then be computed directly. The cost is approximately \f$ 25 n^3 \f$ real flops for the real Schur decomposition and \f$ 3\frac13 n^3 \f$ real flops for the remainder (though the computation time in practice is likely more than this indicates). Details of the algorithm can be found in: Nicholas J. Highan, "Computing real square roots of a real matrix", Linear Algebra Appl., 88/89:405–430, 1987. If the matrix is positive-definite symmetric, then the square root is also positive-definite symmetric. In this case, it is best to use SelfAdjointEigenSolver::operatorSqrt() to compute it. In the complex case, the matrix \f$ M \f$ should be invertible; this is a restriction of the algorithm. The square root computed by this algorithm is the one whose eigenvalues have an argument in the interval \f$ (-\frac12\pi, \frac12\pi] \f$. This is the usual branch cut. The computation is the same as in the real case, except that the complex Schur decomposition is used to reduce the matrix to a triangular matrix. The theoretical cost is the same. Details are in: Åke Björck and Sven Hammarling, "A Schur method for the square root of a matrix", Linear Algebra Appl., 52/53:127–140, 1983. Example: The following program checks that the square root of \f[ \left[ \begin{array}{cc} \cos(\frac13\pi) & -\sin(\frac13\pi) \\ \sin(\frac13\pi) & \cos(\frac13\pi) \end{array} \right], \f] corresponding to a rotation over 60 degrees, is a rotation over 30 degrees: \f[ \left[ \begin{array}{cc} \cos(\frac16\pi) & -\sin(\frac16\pi) \\ \sin(\frac16\pi) & \cos(\frac16\pi) \end{array} \right]. \f] \include MatrixSquareRoot.cpp Output: \verbinclude MatrixSquareRoot.out \sa class RealSchur, class ComplexSchur, class MatrixSquareRoot, SelfAdjointEigenSolver::operatorSqrt(). */ #endif // EIGEN_MATRIX_FUNCTIONS RcppEigen/inst/include/unsupported/Eigen/AutoDiff0000644000176200001440000000203612253717461021602 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_AUTODIFF_MODULE #define EIGEN_AUTODIFF_MODULE namespace Eigen { /** * \defgroup AutoDiff_Module Auto Diff module * * This module features forward automatic differentation via a simple * templated scalar type wrapper AutoDiffScalar. * * Warning : this should NOT be confused with numerical differentiation, which * is a different method and has its own module in Eigen : \ref NumericalDiff_Module. * * \code * #include * \endcode */ //@{ } #include "src/AutoDiff/AutoDiffScalar.h" // #include "src/AutoDiff/AutoDiffVector.h" #include "src/AutoDiff/AutoDiffJacobian.h" namespace Eigen { //@} } #endif // EIGEN_AUTODIFF_MODULE RcppEigen/inst/include/unsupported/Eigen/ArpackSupport0000644000176200001440000000161112446006401022662 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ARPACKSUPPORT_MODULE_H #define EIGEN_ARPACKSUPPORT_MODULE_H #include #include /** \defgroup ArpackSupport_Module Arpack support module * * This module provides a wrapper to Arpack, a library for sparse eigenvalue decomposition. * * \code * #include * \endcode */ #include #include "src/Eigenvalues/ArpackSelfAdjointEigenSolver.h" #include #endif // EIGEN_ARPACKSUPPORT_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ RcppEigen/inst/include/unsupported/Eigen/NonLinearOptimization0000644000176200001440000001322012253717461024372 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Thomas Capricelli // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_NONLINEAROPTIMIZATION_MODULE #define EIGEN_NONLINEAROPTIMIZATION_MODULE #include #include #include #include #include /** * \defgroup NonLinearOptimization_Module Non linear optimization module * * \code * #include * \endcode * * This module provides implementation of two important algorithms in non linear * optimization. In both cases, we consider a system of non linear functions. Of * course, this should work, and even work very well if those functions are * actually linear. But if this is so, you should probably better use other * methods more fitted to this special case. * * One algorithm allows to find an extremum of such a system (Levenberg * Marquardt algorithm) and the second one is used to find * a zero for the system (Powell hybrid "dogleg" method). * * This code is a port of minpack (http://en.wikipedia.org/wiki/MINPACK). * Minpack is a very famous, old, robust and well-reknown package, written in * fortran. Those implementations have been carefully tuned, tested, and used * for several decades. * * The original fortran code was automatically translated using f2c (http://en.wikipedia.org/wiki/F2c) in C, * then c++, and then cleaned by several different authors. * The last one of those cleanings being our starting point : * http://devernay.free.fr/hacks/cminpack.html * * Finally, we ported this code to Eigen, creating classes and API * coherent with Eigen. When possible, we switched to Eigen * implementation, such as most linear algebra (vectors, matrices, stable norms). * * Doing so, we were very careful to check the tests we setup at the very * beginning, which ensure that the same results are found. * * \section Tests Tests * * The tests are placed in the file unsupported/test/NonLinear.cpp. * * There are two kinds of tests : those that come from examples bundled with cminpack. * They guaranty we get the same results as the original algorithms (value for 'x', * for the number of evaluations of the function, and for the number of evaluations * of the jacobian if ever). * * Other tests were added by myself at the very beginning of the * process and check the results for levenberg-marquardt using the reference data * on http://www.itl.nist.gov/div898/strd/nls/nls_main.shtml. Since then i've * carefully checked that the same results were obtained when modifiying the * code. Please note that we do not always get the exact same decimals as they do, * but this is ok : they use 128bits float, and we do the tests using the C type 'double', * which is 64 bits on most platforms (x86 and amd64, at least). * I've performed those tests on several other implementations of levenberg-marquardt, and * (c)minpack performs VERY well compared to those, both in accuracy and speed. * * The documentation for running the tests is on the wiki * http://eigen.tuxfamily.org/index.php?title=Tests * * \section API API : overview of methods * * Both algorithms can use either the jacobian (provided by the user) or compute * an approximation by themselves (actually using Eigen \ref NumericalDiff_Module). * The part of API referring to the latter use 'NumericalDiff' in the method names * (exemple: LevenbergMarquardt.minimizeNumericalDiff() ) * * The methods LevenbergMarquardt.lmder1()/lmdif1()/lmstr1() and * HybridNonLinearSolver.hybrj1()/hybrd1() are specific methods from the original * minpack package that you probably should NOT use until you are porting a code that * was previously using minpack. They just define a 'simple' API with default values * for some parameters. * * All algorithms are provided using Two APIs : * - one where the user inits the algorithm, and uses '*OneStep()' as much as he wants : * this way the caller have control over the steps * - one where the user just calls a method (optimize() or solve()) which will * handle the loop: init + loop until a stop condition is met. Those are provided for * convenience. * * As an example, the method LevenbergMarquardt::minimize() is * implemented as follow : * \code * Status LevenbergMarquardt::minimize(FVectorType &x, const int mode) * { * Status status = minimizeInit(x, mode); * do { * status = minimizeOneStep(x, mode); * } while (status==Running); * return status; * } * \endcode * * \section examples Examples * * The easiest way to understand how to use this module is by looking at the many examples in the file * unsupported/test/NonLinearOptimization.cpp. */ #ifndef EIGEN_PARSED_BY_DOXYGEN #include "src/NonLinearOptimization/qrsolv.h" #include "src/NonLinearOptimization/r1updt.h" #include "src/NonLinearOptimization/r1mpyq.h" #include "src/NonLinearOptimization/rwupdt.h" #include "src/NonLinearOptimization/fdjac1.h" #include "src/NonLinearOptimization/lmpar.h" #include "src/NonLinearOptimization/dogleg.h" #include "src/NonLinearOptimization/covar.h" #include "src/NonLinearOptimization/chkder.h" #endif #include "src/NonLinearOptimization/HybridNonLinearSolver.h" #include "src/NonLinearOptimization/LevenbergMarquardt.h" #endif // EIGEN_NONLINEAROPTIMIZATION_MODULE RcppEigen/inst/include/unsupported/Eigen/Polynomials0000644000176200001440000001124212253717461022406 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_POLYNOMIALS_MODULE_H #define EIGEN_POLYNOMIALS_MODULE_H #include #include #include // Note that EIGEN_HIDE_HEAVY_CODE has to be defined per module #if (defined EIGEN_EXTERN_INSTANTIATIONS) && (EIGEN_EXTERN_INSTANTIATIONS>=2) #ifndef EIGEN_HIDE_HEAVY_CODE #define EIGEN_HIDE_HEAVY_CODE #endif #elif defined EIGEN_HIDE_HEAVY_CODE #undef EIGEN_HIDE_HEAVY_CODE #endif /** * \defgroup Polynomials_Module Polynomials module * \brief This module provides a QR based polynomial solver. * * To use this module, add * \code * #include * \endcode * at the start of your source file. */ #include "src/Polynomials/PolynomialUtils.h" #include "src/Polynomials/Companion.h" #include "src/Polynomials/PolynomialSolver.h" /** \page polynomials Polynomials defines functions for dealing with polynomials and a QR based polynomial solver. \ingroup Polynomials_Module The remainder of the page documents first the functions for evaluating, computing polynomials, computing estimates about polynomials and next the QR based polynomial solver. \section polynomialUtils convenient functions to deal with polynomials \subsection roots_to_monicPolynomial The function \code void roots_to_monicPolynomial( const RootVector& rv, Polynomial& poly ) \endcode computes the coefficients \f$ a_i \f$ of \f$ p(x) = a_0 + a_{1}x + ... + a_{n-1}x^{n-1} + x^n \f$ where \f$ p \f$ is known through its roots i.e. \f$ p(x) = (x-r_1)(x-r_2)...(x-r_n) \f$. \subsection poly_eval The function \code T poly_eval( const Polynomials& poly, const T& x ) \endcode evaluates a polynomial at a given point using stabilized Hörner method. The following code: first computes the coefficients in the monomial basis of the monic polynomial that has the provided roots; then, it evaluates the computed polynomial, using a stabilized Hörner method. \include PolynomialUtils1.cpp Output: \verbinclude PolynomialUtils1.out \subsection Cauchy bounds The function \code Real cauchy_max_bound( const Polynomial& poly ) \endcode provides a maximum bound (the Cauchy one: \f$C(p)\f$) for the absolute value of a root of the given polynomial i.e. \f$ \forall r_i \f$ root of \f$ p(x) = \sum_{k=0}^d a_k x^k \f$, \f$ |r_i| \le C(p) = \sum_{k=0}^{d} \left | \frac{a_k}{a_d} \right | \f$ The leading coefficient \f$ p \f$: should be non zero \f$a_d \neq 0\f$. The function \code Real cauchy_min_bound( const Polynomial& poly ) \endcode provides a minimum bound (the Cauchy one: \f$c(p)\f$) for the absolute value of a non zero root of the given polynomial i.e. \f$ \forall r_i \neq 0 \f$ root of \f$ p(x) = \sum_{k=0}^d a_k x^k \f$, \f$ |r_i| \ge c(p) = \left( \sum_{k=0}^{d} \left | \frac{a_k}{a_0} \right | \right)^{-1} \f$ \section QR polynomial solver class Computes the complex roots of a polynomial by computing the eigenvalues of the associated companion matrix with the QR algorithm. The roots of \f$ p(x) = a_0 + a_1 x + a_2 x^2 + a_{3} x^3 + x^4 \f$ are the eigenvalues of \f$ \left [ \begin{array}{cccc} 0 & 0 & 0 & a_0 \\ 1 & 0 & 0 & a_1 \\ 0 & 1 & 0 & a_2 \\ 0 & 0 & 1 & a_3 \end{array} \right ] \f$ However, the QR algorithm is not guaranteed to converge when there are several eigenvalues with same modulus. Therefore the current polynomial solver is guaranteed to provide a correct result only when the complex roots \f$r_1,r_2,...,r_d\f$ have distinct moduli i.e. \f$ \forall i,j \in [1;d],~ \| r_i \| \neq \| r_j \| \f$. With 32bit (float) floating types this problem shows up frequently. However, almost always, correct accuracy is reached even in these cases for 64bit (double) floating types and small polynomial degree (<20). \include PolynomialSolver1.cpp In the above example: -# a simple use of the polynomial solver is shown; -# the accuracy problem with the QR algorithm is presented: a polynomial with almost conjugate roots is provided to the solver. Those roots have almost same module therefore the QR algorithm failed to converge: the accuracy of the last root is bad; -# a simple way to circumvent the problem is shown: use doubles instead of floats. Output: \verbinclude PolynomialSolver1.out */ #include #endif // EIGEN_POLYNOMIALS_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ RcppEigen/inst/include/unsupported/Eigen/FFT0000644000176200001440000003313412253717461020523 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Mark Borgerding mark a borgerding net // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_FFT_H #define EIGEN_FFT_H #include #include #include #include /** * \defgroup FFT_Module Fast Fourier Transform module * * \code * #include * \endcode * * This module provides Fast Fourier transformation, with a configurable backend * implementation. * * The default implementation is based on kissfft. It is a small, free, and * reasonably efficient default. * * There are currently two implementation backend: * * - fftw (http://www.fftw.org) : faster, GPL -- incompatible with Eigen in LGPL form, bigger code size. * - MKL (http://en.wikipedia.org/wiki/Math_Kernel_Library) : fastest, commercial -- may be incompatible with Eigen in GPL form. * * \section FFTDesign Design * * The following design decisions were made concerning scaling and * half-spectrum for real FFT. * * The intent is to facilitate generic programming and ease migrating code * from Matlab/octave. * We think the default behavior of Eigen/FFT should favor correctness and * generality over speed. Of course, the caller should be able to "opt-out" from this * behavior and get the speed increase if they want it. * * 1) %Scaling: * Other libraries (FFTW,IMKL,KISSFFT) do not perform scaling, so there * is a constant gain incurred after the forward&inverse transforms , so * IFFT(FFT(x)) = Kx; this is done to avoid a vector-by-value multiply. * The downside is that algorithms that worked correctly in Matlab/octave * don't behave the same way once implemented in C++. * * How Eigen/FFT differs: invertible scaling is performed so IFFT( FFT(x) ) = x. * * 2) Real FFT half-spectrum * Other libraries use only half the frequency spectrum (plus one extra * sample for the Nyquist bin) for a real FFT, the other half is the * conjugate-symmetric of the first half. This saves them a copy and some * memory. The downside is the caller needs to have special logic for the * number of bins in complex vs real. * * How Eigen/FFT differs: The full spectrum is returned from the forward * transform. This facilitates generic template programming by obviating * separate specializations for real vs complex. On the inverse * transform, only half the spectrum is actually used if the output type is real. */ #ifdef EIGEN_FFTW_DEFAULT // FFTW: faster, GPL -- incompatible with Eigen in LGPL form, bigger code size # include # include "src/FFT/ei_fftw_impl.h" namespace Eigen { //template typedef struct internal::fftw_impl default_fft_impl; this does not work template struct default_fft_impl : public internal::fftw_impl {}; } #elif defined EIGEN_MKL_DEFAULT // TODO // intel Math Kernel Library: fastest, commercial -- may be incompatible with Eigen in GPL form # include "src/FFT/ei_imklfft_impl.h" namespace Eigen { template struct default_fft_impl : public internal::imklfft_impl {}; } #else // internal::kissfft_impl: small, free, reasonably efficient default, derived from kissfft // # include "src/FFT/ei_kissfft_impl.h" namespace Eigen { template struct default_fft_impl : public internal::kissfft_impl {}; } #endif namespace Eigen { // template struct fft_fwd_proxy; template struct fft_inv_proxy; namespace internal { template struct traits< fft_fwd_proxy > { typedef typename T_SrcMat::PlainObject ReturnType; }; template struct traits< fft_inv_proxy > { typedef typename T_SrcMat::PlainObject ReturnType; }; } template struct fft_fwd_proxy : public ReturnByValue > { typedef DenseIndex Index; fft_fwd_proxy(const T_SrcMat& src,T_FftIfc & fft, Index nfft) : m_src(src),m_ifc(fft), m_nfft(nfft) {} template void evalTo(T_DestMat& dst) const; Index rows() const { return m_src.rows(); } Index cols() const { return m_src.cols(); } protected: const T_SrcMat & m_src; T_FftIfc & m_ifc; Index m_nfft; private: fft_fwd_proxy& operator=(const fft_fwd_proxy&); }; template struct fft_inv_proxy : public ReturnByValue > { typedef DenseIndex Index; fft_inv_proxy(const T_SrcMat& src,T_FftIfc & fft, Index nfft) : m_src(src),m_ifc(fft), m_nfft(nfft) {} template void evalTo(T_DestMat& dst) const; Index rows() const { return m_src.rows(); } Index cols() const { return m_src.cols(); } protected: const T_SrcMat & m_src; T_FftIfc & m_ifc; Index m_nfft; private: fft_inv_proxy& operator=(const fft_inv_proxy&); }; template > class FFT { public: typedef T_Impl impl_type; typedef DenseIndex Index; typedef typename impl_type::Scalar Scalar; typedef typename impl_type::Complex Complex; enum Flag { Default=0, // goof proof Unscaled=1, HalfSpectrum=2, // SomeOtherSpeedOptimization=4 Speedy=32767 }; FFT( const impl_type & impl=impl_type() , Flag flags=Default ) :m_impl(impl),m_flag(flags) { } inline bool HasFlag(Flag f) const { return (m_flag & (int)f) == f;} inline void SetFlag(Flag f) { m_flag |= (int)f;} inline void ClearFlag(Flag f) { m_flag &= (~(int)f);} inline void fwd( Complex * dst, const Scalar * src, Index nfft) { m_impl.fwd(dst,src,static_cast(nfft)); if ( HasFlag(HalfSpectrum) == false) ReflectSpectrum(dst,nfft); } inline void fwd( Complex * dst, const Complex * src, Index nfft) { m_impl.fwd(dst,src,static_cast(nfft)); } /* inline void fwd2(Complex * dst, const Complex * src, int n0,int n1) { m_impl.fwd2(dst,src,n0,n1); } */ template inline void fwd( std::vector & dst, const std::vector<_Input> & src) { if ( NumTraits<_Input>::IsComplex == 0 && HasFlag(HalfSpectrum) ) dst.resize( (src.size()>>1)+1); // half the bins + Nyquist bin else dst.resize(src.size()); fwd(&dst[0],&src[0],src.size()); } template inline void fwd( MatrixBase & dst, const MatrixBase & src, Index nfft=-1) { typedef typename ComplexDerived::Scalar dst_type; typedef typename InputDerived::Scalar src_type; EIGEN_STATIC_ASSERT_VECTOR_ONLY(InputDerived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(ComplexDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(ComplexDerived,InputDerived) // size at compile-time EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) EIGEN_STATIC_ASSERT(int(InputDerived::Flags)&int(ComplexDerived::Flags)&DirectAccessBit, THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES) if (nfft<1) nfft = src.size(); if ( NumTraits< src_type >::IsComplex == 0 && HasFlag(HalfSpectrum) ) dst.derived().resize( (nfft>>1)+1); else dst.derived().resize(nfft); if ( src.innerStride() != 1 || src.size() < nfft ) { Matrix tmp; if (src.size() inline fft_fwd_proxy< MatrixBase, FFT > fwd( const MatrixBase & src, Index nfft=-1) { return fft_fwd_proxy< MatrixBase ,FFT >( src, *this,nfft ); } template inline fft_inv_proxy< MatrixBase, FFT > inv( const MatrixBase & src, Index nfft=-1) { return fft_inv_proxy< MatrixBase ,FFT >( src, *this,nfft ); } inline void inv( Complex * dst, const Complex * src, Index nfft) { m_impl.inv( dst,src,static_cast(nfft) ); if ( HasFlag( Unscaled ) == false) scale(dst,Scalar(1./nfft),nfft); // scale the time series } inline void inv( Scalar * dst, const Complex * src, Index nfft) { m_impl.inv( dst,src,static_cast(nfft) ); if ( HasFlag( Unscaled ) == false) scale(dst,Scalar(1./nfft),nfft); // scale the time series } template inline void inv( MatrixBase & dst, const MatrixBase & src, Index nfft=-1) { typedef typename ComplexDerived::Scalar src_type; typedef typename OutputDerived::Scalar dst_type; const bool realfft= (NumTraits::IsComplex == 0); EIGEN_STATIC_ASSERT_VECTOR_ONLY(OutputDerived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(ComplexDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(ComplexDerived,OutputDerived) // size at compile-time EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) EIGEN_STATIC_ASSERT(int(OutputDerived::Flags)&int(ComplexDerived::Flags)&DirectAccessBit, THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES) if (nfft<1) { //automatic FFT size determination if ( realfft && HasFlag(HalfSpectrum) ) nfft = 2*(src.size()-1); //assume even fft size else nfft = src.size(); } dst.derived().resize( nfft ); // check for nfft that does not fit the input data size Index resize_input= ( realfft && HasFlag(HalfSpectrum) ) ? ( (nfft/2+1) - src.size() ) : ( nfft - src.size() ); if ( src.innerStride() != 1 || resize_input ) { // if the vector is strided, then we need to copy it to a packed temporary Matrix tmp; if ( resize_input ) { size_t ncopy = (std::min)(src.size(),src.size() + resize_input); tmp.setZero(src.size() + resize_input); if ( realfft && HasFlag(HalfSpectrum) ) { // pad at the Nyquist bin tmp.head(ncopy) = src.head(ncopy); tmp(ncopy-1) = real(tmp(ncopy-1)); // enforce real-only Nyquist bin }else{ size_t nhead,ntail; nhead = 1+ncopy/2-1; // range [0:pi) ntail = ncopy/2-1; // range (-pi:0) tmp.head(nhead) = src.head(nhead); tmp.tail(ntail) = src.tail(ntail); if (resize_input<0) { //shrinking -- create the Nyquist bin as the average of the two bins that fold into it tmp(nhead) = ( src(nfft/2) + src( src.size() - nfft/2 ) )*src_type(.5); }else{ // expanding -- split the old Nyquist bin into two halves tmp(nhead) = src(nhead) * src_type(.5); tmp(tmp.size()-nhead) = tmp(nhead); } } }else{ tmp = src; } inv( &dst[0],&tmp[0], nfft); }else{ inv( &dst[0],&src[0], nfft); } } template inline void inv( std::vector<_Output> & dst, const std::vector & src,Index nfft=-1) { if (nfft<1) nfft = ( NumTraits<_Output>::IsComplex == 0 && HasFlag(HalfSpectrum) ) ? 2*(src.size()-1) : src.size(); dst.resize( nfft ); inv( &dst[0],&src[0],nfft); } /* // TODO: multi-dimensional FFTs inline void inv2(Complex * dst, const Complex * src, int n0,int n1) { m_impl.inv2(dst,src,n0,n1); if ( HasFlag( Unscaled ) == false) scale(dst,1./(n0*n1),n0*n1); } */ inline impl_type & impl() {return m_impl;} private: template inline void scale(T_Data * x,Scalar s,Index nx) { #if 1 for (int k=0;k::Map(x,nx) *= s; else Matrix::MapAligned(x,nx) *= s; //Matrix::Map(x,nx) * s; #endif } inline void ReflectSpectrum(Complex * freq, Index nfft) { // create the implicit right-half spectrum (conjugate-mirror of the left-half) Index nhbins=(nfft>>1)+1; for (Index k=nhbins;k < nfft; ++k ) freq[k] = conj(freq[nfft-k]); } impl_type m_impl; int m_flag; }; template template inline void fft_fwd_proxy::evalTo(T_DestMat& dst) const { m_ifc.fwd( dst, m_src, m_nfft); } template template inline void fft_inv_proxy::evalTo(T_DestMat& dst) const { m_ifc.inv( dst, m_src, m_nfft); } } #endif /* vim: set filetype=cpp et sw=2 ts=2 ai: */ RcppEigen/inst/include/unsupported/Eigen/SparseExtra0000644000176200001440000000245413100733213022327 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_EXTRA_MODULE_H #define EIGEN_SPARSE_EXTRA_MODULE_H #include "../../Eigen/Sparse" #include "../../Eigen/src/Core/util/DisableStupidWarnings.h" #include #include #include #include #include #include #include #ifdef EIGEN_GOOGLEHASH_SUPPORT #include #endif /** * \defgroup SparseExtra_Module SparseExtra module * * This module contains some experimental features extending the sparse module. * * \code * #include * \endcode */ #include "src/SparseExtra/DynamicSparseMatrix.h" #include "src/SparseExtra/BlockOfDynamicSparseMatrix.h" #include "src/SparseExtra/RandomSetter.h" #include "src/SparseExtra/MarketIO.h" #if !defined(_WIN32) #include #include "src/SparseExtra/MatrixMarketIterator.h" #endif #include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SPARSE_EXTRA_MODULE_H RcppEigen/inst/include/unsupported/Eigen/EulerAngles0000644000176200001440000000211613100733213022267 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Tal Hadad // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_EULERANGLES_MODULE_H #define EIGEN_EULERANGLES_MODULE_H #include "Eigen/Core" #include "Eigen/Geometry" #include "Eigen/src/Core/util/DisableStupidWarnings.h" namespace Eigen { /** * \defgroup EulerAngles_Module EulerAngles module * \brief This module provides generic euler angles rotation. * * Euler angles are a way to represent 3D rotation. * * In order to use this module in your code, include this header: * \code * #include * \endcode * * See \ref EulerAngles for more information. * */ } #include "src/EulerAngles/EulerSystem.h" #include "src/EulerAngles/EulerAngles.h" #include "Eigen/src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_EULERANGLES_MODULE_H RcppEigen/inst/include/unsupported/Eigen/Skyline0000644000176200001440000000162012253717461021515 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SKYLINE_MODULE_H #define EIGEN_SKYLINE_MODULE_H #include "Eigen/Core" #include "Eigen/src/Core/util/DisableStupidWarnings.h" #include #include #include #include /** * \defgroup Skyline_Module Skyline module * * * * */ #include "src/Skyline/SkylineUtil.h" #include "src/Skyline/SkylineMatrixBase.h" #include "src/Skyline/SkylineStorage.h" #include "src/Skyline/SkylineMatrix.h" #include "src/Skyline/SkylineInplaceLU.h" #include "src/Skyline/SkylineProduct.h" #include "Eigen/src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SKYLINE_MODULE_H RcppEigen/inst/include/unsupported/Eigen/OpenGLSupport0000644000176200001440000004516213100733213022612 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_OPENGL_MODULE #define EIGEN_OPENGL_MODULE #include #if defined(__APPLE_CC__) #include #else #include #endif namespace Eigen { /** * \defgroup OpenGLSUpport_Module OpenGL Support module * * This module provides wrapper functions for a couple of OpenGL functions * which simplify the way to pass Eigen's object to openGL. * Here is an exmaple: * * \code * // You need to add path_to_eigen/unsupported to your include path. * #include * // ... * Vector3f x, y; * Matrix3f rot; * * glVertex(y + x * rot); * * Quaternion q; * glRotate(q); * * // ... * \endcode * */ //@{ #define EIGEN_GL_FUNC_DECLARATION(FUNC) \ namespace internal { \ template< typename XprType, \ typename Scalar = typename XprType::Scalar, \ int Rows = XprType::RowsAtCompileTime, \ int Cols = XprType::ColsAtCompileTime, \ bool IsGLCompatible = bool(internal::evaluator::Flags&LinearAccessBit) \ && bool(XprType::Flags&DirectAccessBit) \ && (XprType::IsVectorAtCompileTime || (XprType::Flags&RowMajorBit)==0)> \ struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl); \ \ template \ struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ inline static void run(const XprType& p) { \ EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)::type>::run(p); } \ }; \ } \ \ template inline void FUNC(const Eigen::DenseBase& p) { \ EIGEN_CAT(EIGEN_CAT(internal::gl_,FUNC),_impl)::run(p.derived()); \ } #define EIGEN_GL_FUNC_SPECIALIZATION_MAT(FUNC,SCALAR,ROWS,COLS,SUFFIX) \ namespace internal { \ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ inline static void run(const XprType& p) { FUNC##SUFFIX(p.data()); } \ }; \ } #define EIGEN_GL_FUNC_SPECIALIZATION_VEC(FUNC,SCALAR,SIZE,SUFFIX) \ namespace internal { \ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ inline static void run(const XprType& p) { FUNC##SUFFIX(p.data()); } \ }; \ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ inline static void run(const XprType& p) { FUNC##SUFFIX(p.data()); } \ }; \ } EIGEN_GL_FUNC_DECLARATION (glVertex) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,int, 2,2iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,short, 2,2sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,float, 2,2fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,double, 2,2dv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,int, 3,3iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,short, 3,3sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,float, 3,3fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,double, 3,3dv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,int, 4,4iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,short, 4,4sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,float, 4,4fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,double, 4,4dv) EIGEN_GL_FUNC_DECLARATION (glTexCoord) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,int, 2,2iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,short, 2,2sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,float, 2,2fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,double, 2,2dv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,int, 3,3iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,short, 3,3sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,float, 3,3fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,double, 3,3dv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,int, 4,4iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,short, 4,4sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,float, 4,4fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,double, 4,4dv) EIGEN_GL_FUNC_DECLARATION (glColor) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,int, 2,2iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,short, 2,2sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,float, 2,2fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,double, 2,2dv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,int, 3,3iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,short, 3,3sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,float, 3,3fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,double, 3,3dv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,int, 4,4iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,short, 4,4sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,float, 4,4fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,double, 4,4dv) EIGEN_GL_FUNC_DECLARATION (glNormal) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,int, 3,3iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,short, 3,3sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,float, 3,3fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,double, 3,3dv) inline void glScale2fv(const float* v) { glScalef(v[0], v[1], 1.f); } inline void glScale2dv(const double* v) { glScaled(v[0], v[1], 1.0); } inline void glScale3fv(const float* v) { glScalef(v[0], v[1], v[2]); } inline void glScale3dv(const double* v) { glScaled(v[0], v[1], v[2]); } EIGEN_GL_FUNC_DECLARATION (glScale) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,float, 2,2fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,double, 2,2dv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,float, 3,3fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,double, 3,3dv) template void glScale(const UniformScaling& s) { glScale(Matrix::Constant(s.factor())); } inline void glTranslate2fv(const float* v) { glTranslatef(v[0], v[1], 0.f); } inline void glTranslate2dv(const double* v) { glTranslated(v[0], v[1], 0.0); } inline void glTranslate3fv(const float* v) { glTranslatef(v[0], v[1], v[2]); } inline void glTranslate3dv(const double* v) { glTranslated(v[0], v[1], v[2]); } EIGEN_GL_FUNC_DECLARATION (glTranslate) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,float, 2,2fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,double, 2,2dv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,float, 3,3fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,double, 3,3dv) template void glTranslate(const Translation& t) { glTranslate(t.vector()); } template void glTranslate(const Translation& t) { glTranslate(t.vector()); } EIGEN_GL_FUNC_DECLARATION (glMultMatrix) EIGEN_GL_FUNC_SPECIALIZATION_MAT(glMultMatrix,float, 4,4,f) EIGEN_GL_FUNC_SPECIALIZATION_MAT(glMultMatrix,double, 4,4,d) template void glMultMatrix(const Transform& t) { glMultMatrix(t.matrix()); } template void glMultMatrix(const Transform& t) { glMultMatrix(t.matrix()); } template void glMultMatrix(const Transform& t) { glMultMatrix(Transform(t).matrix()); } EIGEN_GL_FUNC_DECLARATION (glLoadMatrix) EIGEN_GL_FUNC_SPECIALIZATION_MAT(glLoadMatrix,float, 4,4,f) EIGEN_GL_FUNC_SPECIALIZATION_MAT(glLoadMatrix,double, 4,4,d) template void glLoadMatrix(const Transform& t) { glLoadMatrix(t.matrix()); } template void glLoadMatrix(const Transform& t) { glLoadMatrix(t.matrix()); } template void glLoadMatrix(const Transform& t) { glLoadMatrix(Transform(t).matrix()); } inline void glRotate(const Rotation2D& rot) { glRotatef(rot.angle()*180.f/float(EIGEN_PI), 0.f, 0.f, 1.f); } inline void glRotate(const Rotation2D& rot) { glRotated(rot.angle()*180.0/EIGEN_PI, 0.0, 0.0, 1.0); } template void glRotate(const RotationBase& rot) { Transform tr(rot); glMultMatrix(tr.matrix()); } #define EIGEN_GL_MAKE_CONST_const const #define EIGEN_GL_MAKE_CONST__ #define EIGEN_GL_EVAL(X) X #define EIGEN_GL_FUNC1_DECLARATION(FUNC,ARG1,CONST) \ namespace internal { \ template< typename XprType, \ typename Scalar = typename XprType::Scalar, \ int Rows = XprType::RowsAtCompileTime, \ int Cols = XprType::ColsAtCompileTime, \ bool IsGLCompatible = bool(internal::evaluator::Flags&LinearAccessBit) \ && bool(XprType::Flags&DirectAccessBit) \ && (XprType::IsVectorAtCompileTime || (XprType::Flags&RowMajorBit)==0)> \ struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl); \ \ template \ struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ inline static void run(ARG1 a,EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { \ EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)::type>::run(a,p); } \ }; \ } \ \ template inline void FUNC(ARG1 a,EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) Eigen::DenseBase& p) { \ EIGEN_CAT(EIGEN_CAT(internal::gl_,FUNC),_impl)::run(a,p.derived()); \ } #define EIGEN_GL_FUNC1_SPECIALIZATION_MAT(FUNC,ARG1,CONST,SCALAR,ROWS,COLS,SUFFIX) \ namespace internal { \ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ inline static void run(ARG1 a, EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { FUNC##SUFFIX(a,p.data()); } \ }; \ } #define EIGEN_GL_FUNC1_SPECIALIZATION_VEC(FUNC,ARG1,CONST,SCALAR,SIZE,SUFFIX) \ namespace internal { \ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ inline static void run(ARG1 a, EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { FUNC##SUFFIX(a,p.data()); } \ }; \ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ inline static void run(ARG1 a, EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { FUNC##SUFFIX(a,p.data()); } \ }; \ } EIGEN_GL_FUNC1_DECLARATION (glGet,GLenum,_) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glGet,GLenum,_,float, 4,4,Floatv) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glGet,GLenum,_,double, 4,4,Doublev) // glUniform API #ifdef GL_VERSION_2_0 inline void glUniform2fv_ei (GLint loc, const float* v) { glUniform2fv(loc,1,v); } inline void glUniform2iv_ei (GLint loc, const int* v) { glUniform2iv(loc,1,v); } inline void glUniform3fv_ei (GLint loc, const float* v) { glUniform3fv(loc,1,v); } inline void glUniform3iv_ei (GLint loc, const int* v) { glUniform3iv(loc,1,v); } inline void glUniform4fv_ei (GLint loc, const float* v) { glUniform4fv(loc,1,v); } inline void glUniform4iv_ei (GLint loc, const int* v) { glUniform4iv(loc,1,v); } inline void glUniformMatrix2fv_ei (GLint loc, const float* v) { glUniformMatrix2fv(loc,1,false,v); } inline void glUniformMatrix3fv_ei (GLint loc, const float* v) { glUniformMatrix3fv(loc,1,false,v); } inline void glUniformMatrix4fv_ei (GLint loc, const float* v) { glUniformMatrix4fv(loc,1,false,v); } EIGEN_GL_FUNC1_DECLARATION (glUniform,GLint,const) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,float, 2,2fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,int, 2,2iv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,float, 3,3fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,int, 3,3iv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,float, 4,4fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,int, 4,4iv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,2,Matrix2fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,3,Matrix3fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,4,Matrix4fv_ei) #endif #ifdef GL_VERSION_2_1 inline void glUniformMatrix2x3fv_ei(GLint loc, const float* v) { glUniformMatrix2x3fv(loc,1,false,v); } inline void glUniformMatrix3x2fv_ei(GLint loc, const float* v) { glUniformMatrix3x2fv(loc,1,false,v); } inline void glUniformMatrix2x4fv_ei(GLint loc, const float* v) { glUniformMatrix2x4fv(loc,1,false,v); } inline void glUniformMatrix4x2fv_ei(GLint loc, const float* v) { glUniformMatrix4x2fv(loc,1,false,v); } inline void glUniformMatrix3x4fv_ei(GLint loc, const float* v) { glUniformMatrix3x4fv(loc,1,false,v); } inline void glUniformMatrix4x3fv_ei(GLint loc, const float* v) { glUniformMatrix4x3fv(loc,1,false,v); } EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,3,Matrix2x3fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,2,Matrix3x2fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,4,Matrix2x4fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,2,Matrix4x2fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,4,Matrix3x4fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,3,Matrix4x3fv_ei) #endif #ifdef GL_VERSION_3_0 inline void glUniform2uiv_ei (GLint loc, const unsigned int* v) { glUniform2uiv(loc,1,v); } inline void glUniform3uiv_ei (GLint loc, const unsigned int* v) { glUniform3uiv(loc,1,v); } inline void glUniform4uiv_ei (GLint loc, const unsigned int* v) { glUniform4uiv(loc,1,v); } EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 2,2uiv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 3,3uiv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 4,4uiv_ei) #endif #ifdef GL_ARB_gpu_shader_fp64 inline void glUniform2dv_ei (GLint loc, const double* v) { glUniform2dv(loc,1,v); } inline void glUniform3dv_ei (GLint loc, const double* v) { glUniform3dv(loc,1,v); } inline void glUniform4dv_ei (GLint loc, const double* v) { glUniform4dv(loc,1,v); } EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 2,2dv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 3,3dv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 4,4dv_ei) #endif //@} } #endif // EIGEN_OPENGL_MODULE RcppEigen/inst/include/unsupported/Eigen/AlignedVector30000644000176200001440000001371713100733213022703 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ALIGNED_VECTOR3 #define EIGEN_ALIGNED_VECTOR3 #include namespace Eigen { /** * \defgroup AlignedVector3_Module Aligned vector3 module * * \code * #include * \endcode */ //@{ /** \class AlignedVector3 * * \brief A vectorization friendly 3D vector * * This class represents a 3D vector internally using a 4D vector * such that vectorization can be seamlessly enabled. Of course, * the same result can be achieved by directly using a 4D vector. * This class makes this process simpler. * */ // TODO specialize Cwise template class AlignedVector3; namespace internal { template struct traits > : traits > { }; } template class AlignedVector3 : public MatrixBase > { typedef Matrix<_Scalar,4,1> CoeffType; CoeffType m_coeffs; public: typedef MatrixBase > Base; EIGEN_DENSE_PUBLIC_INTERFACE(AlignedVector3) using Base::operator*; inline Index rows() const { return 3; } inline Index cols() const { return 1; } Scalar* data() { return m_coeffs.data(); } const Scalar* data() const { return m_coeffs.data(); } Index innerStride() const { return 1; } Index outerStride() const { return 3; } inline const Scalar& coeff(Index row, Index col) const { return m_coeffs.coeff(row, col); } inline Scalar& coeffRef(Index row, Index col) { return m_coeffs.coeffRef(row, col); } inline const Scalar& coeff(Index index) const { return m_coeffs.coeff(index); } inline Scalar& coeffRef(Index index) { return m_coeffs.coeffRef(index);} inline AlignedVector3(const Scalar& x, const Scalar& y, const Scalar& z) : m_coeffs(x, y, z, Scalar(0)) {} inline AlignedVector3(const AlignedVector3& other) : Base(), m_coeffs(other.m_coeffs) {} template struct generic_assign_selector {}; template struct generic_assign_selector { inline static void run(AlignedVector3& dest, const XprType& src) { dest.m_coeffs = src; } }; template struct generic_assign_selector { inline static void run(AlignedVector3& dest, const XprType& src) { dest.m_coeffs.template head<3>() = src; dest.m_coeffs.w() = Scalar(0); } }; template inline AlignedVector3(const MatrixBase& other) { generic_assign_selector::run(*this,other.derived()); } inline AlignedVector3& operator=(const AlignedVector3& other) { m_coeffs = other.m_coeffs; return *this; } template inline AlignedVector3& operator=(const MatrixBase& other) { generic_assign_selector::run(*this,other.derived()); return *this; } inline AlignedVector3 operator+(const AlignedVector3& other) const { return AlignedVector3(m_coeffs + other.m_coeffs); } inline AlignedVector3& operator+=(const AlignedVector3& other) { m_coeffs += other.m_coeffs; return *this; } inline AlignedVector3 operator-(const AlignedVector3& other) const { return AlignedVector3(m_coeffs - other.m_coeffs); } inline AlignedVector3 operator-=(const AlignedVector3& other) { m_coeffs -= other.m_coeffs; return *this; } inline AlignedVector3 operator*(const Scalar& s) const { return AlignedVector3(m_coeffs * s); } inline friend AlignedVector3 operator*(const Scalar& s,const AlignedVector3& vec) { return AlignedVector3(s * vec.m_coeffs); } inline AlignedVector3& operator*=(const Scalar& s) { m_coeffs *= s; return *this; } inline AlignedVector3 operator/(const Scalar& s) const { return AlignedVector3(m_coeffs / s); } inline AlignedVector3& operator/=(const Scalar& s) { m_coeffs /= s; return *this; } inline Scalar dot(const AlignedVector3& other) const { eigen_assert(m_coeffs.w()==Scalar(0)); eigen_assert(other.m_coeffs.w()==Scalar(0)); return m_coeffs.dot(other.m_coeffs); } inline void normalize() { m_coeffs /= norm(); } inline AlignedVector3 normalized() const { return AlignedVector3(m_coeffs / norm()); } inline Scalar sum() const { eigen_assert(m_coeffs.w()==Scalar(0)); return m_coeffs.sum(); } inline Scalar squaredNorm() const { eigen_assert(m_coeffs.w()==Scalar(0)); return m_coeffs.squaredNorm(); } inline Scalar norm() const { using std::sqrt; return sqrt(squaredNorm()); } inline AlignedVector3 cross(const AlignedVector3& other) const { return AlignedVector3(m_coeffs.cross3(other.m_coeffs)); } template inline bool isApprox(const MatrixBase& other, const RealScalar& eps=NumTraits::dummy_precision()) const { return m_coeffs.template head<3>().isApprox(other,eps); } CoeffType& coeffs() { return m_coeffs; } const CoeffType& coeffs() const { return m_coeffs; } }; namespace internal { template struct eval, Dense> { typedef const AlignedVector3<_Scalar>& type; }; template struct evaluator > : evaluator > { typedef AlignedVector3 XprType; typedef evaluator > Base; evaluator(const XprType &m) : Base(m.coeffs()) {} }; } //@} } #endif // EIGEN_ALIGNED_VECTOR3 RcppEigen/inst/include/unsupported/Eigen/SpecialFunctions0000644000176200001440000000304613100733213023335 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPECIALFUNCTIONS_MODULE #define EIGEN_SPECIALFUNCTIONS_MODULE #include #include "../../Eigen/Core" #include "../../Eigen/src/Core/util/DisableStupidWarnings.h" namespace Eigen { /** * \defgroup SpecialFunctions_Module Special math functions module * * This module features additional coefficient-wise math functions available * within the numext:: namespace for the scalar version, and as method and/or free * functions of Array. Those include: * * - erf * - erfc * - lgamma * - igamma * - igammac * - digamma * - polygamma * - zeta * - betainc * * \code * #include * \endcode */ //@{ } #include "src/SpecialFunctions/SpecialFunctionsImpl.h" #include "src/SpecialFunctions/SpecialFunctionsPacketMath.h" #include "src/SpecialFunctions/SpecialFunctionsHalf.h" #include "src/SpecialFunctions/SpecialFunctionsFunctors.h" #include "src/SpecialFunctions/SpecialFunctionsArrayAPI.h" #if defined EIGEN_VECTORIZE_CUDA #include "src/SpecialFunctions/arch/CUDA/CudaSpecialFunctions.h" #endif namespace Eigen { //@} } #include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SPECIALFUNCTIONS_MODULE RcppEigen/inst/include/unsupported/Eigen/CXX11/0000755000176200001440000000000013100733213020742 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/CXX11/Tensor0000644000176200001440000001021713100733213022140 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner // Copyright (C) 2013 Christian Seiler // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. //#ifndef EIGEN_CXX11_TENSOR_MODULE //#define EIGEN_CXX11_TENSOR_MODULE #include "../../../Eigen/Core" #ifdef EIGEN_USE_SYCL #undef min #undef max #undef isnan #undef isinf #undef isfinite #include #include #include #include #endif #include #include "../SpecialFunctions" #include "src/util/CXX11Meta.h" #include "src/util/MaxSizeVector.h" /** \defgroup CXX11_Tensor_Module Tensor Module * * This module provides a Tensor class for storing arbitrarily indexed * objects. * * \code * #include * \endcode */ #include #include #include #ifdef _WIN32 typedef __int16 int16_t; typedef unsigned __int16 uint16_t; typedef __int32 int32_t; typedef unsigned __int32 uint32_t; typedef __int64 int64_t; typedef unsigned __int64 uint64_t; #else #include #endif #if __cplusplus > 199711 || EIGEN_COMP_MSVC >= 1900 #include #endif #ifdef _WIN32 #include #elif defined(__APPLE__) #include #else #include #endif #ifdef EIGEN_USE_THREADS #include "ThreadPool" #endif #ifdef EIGEN_USE_GPU #include #include #if __cplusplus >= 201103L #include #include #endif #endif #include "src/Tensor/TensorMacros.h" #include "src/Tensor/TensorForwardDeclarations.h" #include "src/Tensor/TensorMeta.h" #include "src/Tensor/TensorFunctors.h" #include "src/Tensor/TensorCostModel.h" #include "src/Tensor/TensorDeviceDefault.h" #include "src/Tensor/TensorDeviceThreadPool.h" #include "src/Tensor/TensorDeviceCuda.h" #include "src/Tensor/TensorDeviceSycl.h" #include "src/Tensor/TensorIndexList.h" #include "src/Tensor/TensorDimensionList.h" #include "src/Tensor/TensorDimensions.h" #include "src/Tensor/TensorInitializer.h" #include "src/Tensor/TensorTraits.h" #include "src/Tensor/TensorRandom.h" #include "src/Tensor/TensorUInt128.h" #include "src/Tensor/TensorIntDiv.h" #include "src/Tensor/TensorGlobalFunctions.h" #include "src/Tensor/TensorBase.h" #include "src/Tensor/TensorEvaluator.h" #include "src/Tensor/TensorExpr.h" #include "src/Tensor/TensorReduction.h" #include "src/Tensor/TensorReductionCuda.h" #include "src/Tensor/TensorArgMax.h" #include "src/Tensor/TensorConcatenation.h" #include "src/Tensor/TensorContractionMapper.h" #include "src/Tensor/TensorContractionBlocking.h" #include "src/Tensor/TensorContraction.h" #include "src/Tensor/TensorContractionThreadPool.h" #include "src/Tensor/TensorContractionCuda.h" #include "src/Tensor/TensorConversion.h" #include "src/Tensor/TensorConvolution.h" #include "src/Tensor/TensorFFT.h" #include "src/Tensor/TensorPatch.h" #include "src/Tensor/TensorImagePatch.h" #include "src/Tensor/TensorVolumePatch.h" #include "src/Tensor/TensorBroadcasting.h" #include "src/Tensor/TensorChipping.h" #include "src/Tensor/TensorInflation.h" #include "src/Tensor/TensorLayoutSwap.h" #include "src/Tensor/TensorMorphing.h" #include "src/Tensor/TensorPadding.h" #include "src/Tensor/TensorReverse.h" #include "src/Tensor/TensorShuffling.h" #include "src/Tensor/TensorStriding.h" #include "src/Tensor/TensorCustomOp.h" #include "src/Tensor/TensorEvalTo.h" #include "src/Tensor/TensorForcedEval.h" #include "src/Tensor/TensorGenerator.h" #include "src/Tensor/TensorAssign.h" #include "src/Tensor/TensorScan.h" #include "src/Tensor/TensorSycl.h" #include "src/Tensor/TensorExecutor.h" #include "src/Tensor/TensorDevice.h" #include "src/Tensor/TensorStorage.h" #include "src/Tensor/Tensor.h" #include "src/Tensor/TensorFixedSize.h" #include "src/Tensor/TensorMap.h" #include "src/Tensor/TensorRef.h" #include "src/Tensor/TensorIO.h" #include //#endif // EIGEN_CXX11_TENSOR_MODULE RcppEigen/inst/include/unsupported/Eigen/CXX11/src/0000755000176200001440000000000013100733213021531 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/CXX11/src/Tensor/0000755000176200001440000000000013235710577023024 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h0000644000176200001440000001455413100733213026372 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H #define EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H namespace Eigen { /** \class TensorForcedEval * \ingroup CXX11_Tensor_Module * * \brief Tensor reshaping class. * * */ /// template class MakePointer_ is added to convert the host pointer to the device pointer. /// It is added due to the fact that for our device compiler T* is not allowed. /// If we wanted to use the same Evaluator functions we have to convert that type to our pointer T. /// This is done through our MakePointer_ class. By default the Type in the MakePointer_ is T* . /// Therefore, by adding the default value, we managed to convert the type and it does not break any /// existing code as its default value is T*. namespace internal { template class MakePointer_> struct traits > { // Type promotion to handle the case where the types of the lhs and the rhs are different. typedef typename XprType::Scalar Scalar; typedef traits XprTraits; typedef typename traits::StorageKind StorageKind; typedef typename traits::Index Index; typedef typename XprType::Nested Nested; typedef typename remove_reference::type _Nested; static const int NumDimensions = XprTraits::NumDimensions; static const int Layout = XprTraits::Layout; enum { Flags = 0 }; template struct MakePointer { // Intermediate typedef to workaround MSVC issue. typedef MakePointer_ MakePointerT; typedef typename MakePointerT::Type Type; }; }; template class MakePointer_> struct eval, Eigen::Dense> { typedef const TensorForcedEvalOp& type; }; template class MakePointer_> struct nested, 1, typename eval >::type> { typedef TensorForcedEvalOp type; }; } // end namespace internal template class MakePointer_> class TensorForcedEvalOp : public TensorBase, ReadOnlyAccessors> { public: typedef typename Eigen::internal::traits::Scalar Scalar; typedef typename Eigen::NumTraits::Real RealScalar; typedef typename internal::remove_const::type CoeffReturnType; typedef typename Eigen::internal::nested::type Nested; typedef typename Eigen::internal::traits::StorageKind StorageKind; typedef typename Eigen::internal::traits::Index Index; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorForcedEvalOp(const XprType& expr) : m_xpr(expr) {} EIGEN_DEVICE_FUNC const typename internal::remove_all::type& expression() const { return m_xpr; } protected: typename XprType::Nested m_xpr; }; template class MakePointer_> struct TensorEvaluator, Device> { typedef TensorForcedEvalOp XprType; typedef typename ArgType::Scalar Scalar; typedef typename TensorEvaluator::Dimensions Dimensions; typedef typename XprType::Index Index; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType::type PacketReturnType; static const int PacketSize = internal::unpacket_traits::size; enum { IsAligned = true, PacketAccess = (PacketSize > 1), Layout = TensorEvaluator::Layout, RawAccess = true }; EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) /// op_ is used for sycl : m_impl(op.expression(), device), m_op(op.expression()), m_device(device), m_buffer(NULL) { } EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) { const Index numValues = internal::array_prod(m_impl.dimensions()); m_buffer = (CoeffReturnType*)m_device.allocate(numValues * sizeof(CoeffReturnType)); // Should initialize the memory in case we're dealing with non POD types. if (NumTraits::RequireInitialization) { for (Index i = 0; i < numValues; ++i) { new(m_buffer+i) CoeffReturnType(); } } typedef TensorEvalToOp< const typename internal::remove_const::type > EvalTo; EvalTo evalToTmp(m_buffer, m_op); const bool PacketAccess = internal::IsVectorizable::value; internal::TensorExecutor::type, PacketAccess>::run(evalToTmp, m_device); return true; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { m_device.deallocate(m_buffer); m_buffer = NULL; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_buffer[index]; } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { return internal::ploadt(m_buffer + index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); } EIGEN_DEVICE_FUNC typename MakePointer::Type data() const { return m_buffer; } /// required by sycl in order to extract the sycl accessor const TensorEvaluator& impl() { return m_impl; } /// used by sycl in order to build the sycl buffer const Device& device() const{return m_device;} private: TensorEvaluator m_impl; const ArgType m_op; const Device& m_device; typename MakePointer::Type m_buffer; }; } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H RcppEigen/inst/include/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h0000644000176200001440000002212213100733213025566 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Benoit Steiner // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H #define EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H namespace Eigen { namespace internal { namespace { EIGEN_DEVICE_FUNC uint64_t get_random_seed() { #ifdef __CUDA_ARCH__ // We don't support 3d kernels since we currently only use 1 and // 2d kernels. assert(threadIdx.z == 0); return clock64() + blockIdx.x * blockDim.x + threadIdx.x + gridDim.x * blockDim.x * (blockIdx.y * blockDim.y + threadIdx.y); #elif defined _WIN32 // Use the current time as a baseline. SYSTEMTIME st; GetSystemTime(&st); int time = st.wSecond + 1000 * st.wMilliseconds; // Mix in a random number to make sure that we get different seeds if // we try to generate seeds faster than the clock resolution. // We need 2 random values since the generator only generate 16 bits at // a time (https://msdn.microsoft.com/en-us/library/398ax69y.aspx) int rnd1 = ::rand(); int rnd2 = ::rand(); uint64_t rnd = (rnd1 | rnd2 << 16) ^ time; return rnd; #elif defined __APPLE__ // Same approach as for win32, except that the random number generator // is better (// https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man3/random.3.html#//apple_ref/doc/man/3/random). uint64_t rnd = ::random() ^ mach_absolute_time(); return rnd; #else // Augment the current time with pseudo random number generation // to ensure that we get different seeds if we try to generate seeds // faster than the clock resolution. timespec ts; clock_gettime(CLOCK_REALTIME, &ts); uint64_t rnd = ::random() ^ ts.tv_nsec; return rnd; #endif } static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE unsigned PCG_XSH_RS_generator(uint64_t* state) { // TODO: Unify with the implementation in the non blocking thread pool. uint64_t current = *state; // Update the internal state *state = current * 6364136223846793005ULL + 0xda3e39cb94b95bdbULL; // Generate the random output (using the PCG-XSH-RS scheme) return static_cast((current ^ (current >> 22)) >> (22 + (current >> 61))); } static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE uint64_t PCG_XSH_RS_state(uint64_t seed) { seed = seed ? seed : get_random_seed(); return seed * 6364136223846793005ULL + 0xda3e39cb94b95bdbULL; } } // namespace template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T RandomToTypeUniform(uint64_t* state) { unsigned rnd = PCG_XSH_RS_generator(state); return static_cast(rnd); } template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half RandomToTypeUniform(uint64_t* state) { Eigen::half result; // Generate 10 random bits for the mantissa unsigned rnd = PCG_XSH_RS_generator(state); result.x = static_cast(rnd & 0x3ffu); // Set the exponent result.x |= (static_cast(15) << 10); // Return the final result return result - Eigen::half(1.0f); } template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float RandomToTypeUniform(uint64_t* state) { typedef union { uint32_t raw; float fp; } internal; internal result; // Generate 23 random bits for the mantissa mantissa const unsigned rnd = PCG_XSH_RS_generator(state); result.raw = rnd & 0x7fffffu; // Set the exponent result.raw |= (static_cast(127) << 23); // Return the final result return result.fp - 1.0f; } template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double RandomToTypeUniform(uint64_t* state) { typedef union { uint64_t raw; double dp; } internal; internal result; result.raw = 0; // Generate 52 random bits for the mantissa // First generate the upper 20 bits unsigned rnd1 = PCG_XSH_RS_generator(state) & 0xfffffu; // The generate the lower 32 bits unsigned rnd2 = PCG_XSH_RS_generator(state); result.raw = (static_cast(rnd1) << 32) | rnd2; // Set the exponent result.raw |= (static_cast(1023) << 52); // Return the final result return result.dp - 1.0; } template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex RandomToTypeUniform >(uint64_t* state) { return std::complex(RandomToTypeUniform(state), RandomToTypeUniform(state)); } template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex RandomToTypeUniform >(uint64_t* state) { return std::complex(RandomToTypeUniform(state), RandomToTypeUniform(state)); } template class UniformRandomGenerator { public: static const bool PacketAccess = true; // Uses the given "seed" if non-zero, otherwise uses a random seed. EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE UniformRandomGenerator( uint64_t seed = 0) { m_state = PCG_XSH_RS_state(seed); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE UniformRandomGenerator( const UniformRandomGenerator& other) { m_state = other.m_state; } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(Index i) const { uint64_t local_state = m_state + i; T result = RandomToTypeUniform(&local_state); m_state = local_state; return result; } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(Index i) const { const int packetSize = internal::unpacket_traits::size; EIGEN_ALIGN_MAX T values[packetSize]; uint64_t local_state = m_state + i; for (int j = 0; j < packetSize; ++j) { values[j] = RandomToTypeUniform(&local_state); } m_state = local_state; return internal::pload(values); } private: mutable uint64_t m_state; }; template struct functor_traits > { enum { // Rough estimate for floating point, multiplied by ceil(sizeof(T) / sizeof(float)). Cost = 12 * NumTraits::AddCost * ((sizeof(Scalar) + sizeof(float) - 1) / sizeof(float)), PacketAccess = UniformRandomGenerator::PacketAccess }; }; template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T RandomToTypeNormal(uint64_t* state) { // Use the ratio of uniform method to generate numbers following a normal // distribution. See for example Numerical Recipes chapter 7.3.9 for the // details. T u, v, q; do { u = RandomToTypeUniform(state); v = T(1.7156) * (RandomToTypeUniform(state) - T(0.5)); const T x = u - T(0.449871); const T y = numext::abs(v) + T(0.386595); q = x*x + y * (T(0.196)*y - T(0.25472)*x); } while (q > T(0.27597) && (q > T(0.27846) || v*v > T(-4) * numext::log(u) * u*u)); return v/u; } template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex RandomToTypeNormal >(uint64_t* state) { return std::complex(RandomToTypeNormal(state), RandomToTypeNormal(state)); } template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex RandomToTypeNormal >(uint64_t* state) { return std::complex(RandomToTypeNormal(state), RandomToTypeNormal(state)); } template class NormalRandomGenerator { public: static const bool PacketAccess = true; // Uses the given "seed" if non-zero, otherwise uses a random seed. EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NormalRandomGenerator(uint64_t seed = 0) { m_state = PCG_XSH_RS_state(seed); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NormalRandomGenerator( const NormalRandomGenerator& other) { m_state = other.m_state; } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(Index i) const { uint64_t local_state = m_state + i; T result = RandomToTypeNormal(&local_state); m_state = local_state; return result; } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(Index i) const { const int packetSize = internal::unpacket_traits::size; EIGEN_ALIGN_MAX T values[packetSize]; uint64_t local_state = m_state + i; for (int j = 0; j < packetSize; ++j) { values[j] = RandomToTypeNormal(&local_state); } m_state = local_state; return internal::pload(values); } private: mutable uint64_t m_state; }; template struct functor_traits > { enum { // On average, we need to generate about 3 random numbers // 15 mul, 8 add, 1.5 logs Cost = 3 * functor_traits >::Cost + 15 * NumTraits::AddCost + 8 * NumTraits::AddCost + 3 * functor_traits >::Cost / 2, PacketAccess = NormalRandomGenerator::PacketAccess }; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H RcppEigen/inst/include/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h0000644000176200001440000001504413100733213030227 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Mehdi Goli Codeplay Software Ltd. // Ralph Potter Codeplay Software Ltd. // Luke Iwanski Codeplay Software Ltd. // Contact: // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /***************************************************************** * TensorSyclPlaceHolderExpr.h * * \brief: * This is the specialisation of the placeholder expression based on the * operation type * *****************************************************************/ #ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_PLACEHOLDER_EXPR_HPP #define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_PLACEHOLDER_EXPR_HPP namespace Eigen { namespace TensorSycl { namespace internal { /// \struct PlaceHolder /// \brief PlaceHolder is used to replace the \ref TensorMap in the expression /// tree. /// PlaceHolder contains the order of the leaf node in the expression tree. template struct PlaceHolder { static constexpr size_t I = N; typedef Scalar Type; }; /// \sttruct PlaceHolderExpression /// \brief it is used to create the PlaceHolder expression. The PlaceHolder /// expression is a copy of expression type in which the TensorMap of the has /// been replaced with PlaceHolder. template struct PlaceHolderExpression; template struct CalculateIndex; template struct CalculateIndex{ typedef typename PlaceHolderExpression::Type ArgType; typedef utility::tuple::Tuple ArgsTuple; }; template struct CalculateIndex{ static const size_t Arg2LeafCount = LeafCount::Count; typedef typename PlaceHolderExpression::Type Arg1Type; typedef typename PlaceHolderExpression::Type Arg2Type; typedef utility::tuple::Tuple ArgsTuple; }; template struct CalculateIndex { static const size_t Arg3LeafCount = LeafCount::Count; static const size_t Arg2LeafCount = LeafCount::Count; typedef typename PlaceHolderExpression::Type Arg1Type; typedef typename PlaceHolderExpression::Type Arg2Type; typedef typename PlaceHolderExpression::Type Arg3Type; typedef utility::tuple::Tuple ArgsTuple; }; template class Category , class OP, class TPL> struct CategoryHelper; template class Category , class OP, class ...T > struct CategoryHelper > { typedef Category Type; }; template class Category , class ...T > struct CategoryHelper > { typedef Category Type; }; /// specialisation of the \ref PlaceHolderExpression when the node is /// TensorCwiseNullaryOp, TensorCwiseUnaryOp, TensorBroadcastingOp, TensorCwiseBinaryOp, TensorCwiseTernaryOp #define OPEXPRCATEGORY(CVQual)\ template